diff --git a/Makefile b/Makefile index f3ff2d575bbcad..da904a6f64a138 100644 --- a/Makefile +++ b/Makefile @@ -70,7 +70,7 @@ $(NODE_G_EXE): config.gypi out/Makefile $(MAKE) -C out BUILDTYPE=Debug V=$(V) ln -fs out/Debug/$(NODE_EXE) $@ -out/Makefile: common.gypi deps/uv/uv.gyp deps/http_parser/http_parser.gyp deps/zlib/zlib.gyp deps/v8/build/toolchain.gypi deps/v8/build/features.gypi deps/v8/tools/gyp/v8.gyp node.gyp config.gypi +out/Makefile: common.gypi deps/uv/uv.gyp deps/http_parser/http_parser.gyp deps/zlib/zlib.gyp deps/v8/gypfiles/toolchain.gypi deps/v8/gypfiles/features.gypi deps/v8/src/v8.gyp node.gyp config.gypi $(PYTHON) tools/gyp_node.py -f make config.gypi: configure @@ -103,7 +103,6 @@ distclean: -rm -rf deps/icu4c*.tgz deps/icu4c*.zip deps/icu-tmp -rm -f $(BINARYTAR).* $(TARBALL).* -rm -rf deps/v8/testing/gmock - -rm -rf deps/v8/testing/gtest check: test diff --git a/common.gypi b/common.gypi index 01ab2def6e0f29..0820f1cf32921d 100644 --- a/common.gypi +++ b/common.gypi @@ -31,6 +31,9 @@ # Don't bake anything extra into the snapshot. 'v8_use_external_startup_data%': 0, + # Don't use ICU data file (icudtl.dat) from V8, we use our own. + 'icu_use_data_file_flag%': 0, + 'conditions': [ ['OS == "win"', { 'os_posix': 0, @@ -44,7 +47,7 @@ 'V8_BASE': '<(PRODUCT_DIR)/libv8_base.a', }, { 'OBJ_DIR': '<(PRODUCT_DIR)/obj.target', - 'V8_BASE': '<(PRODUCT_DIR)/obj.target/deps/v8/tools/gyp/libv8_base.a', + 'V8_BASE': '<(PRODUCT_DIR)/obj.target/deps/v8/src/libv8_base.a', }], ['openssl_fips != ""', { 'OPENSSL_PRODUCT': 'libcrypto.a', @@ -382,6 +385,7 @@ 'xcode_settings': { 'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0', 'CLANG_CXX_LANGUAGE_STANDARD': 'gnu++0x', # -std=gnu++0x + 'CLANG_CXX_LIBRARY': 'libc++', }, }], ], diff --git a/configure b/configure index 42dac5ff64ce94..9b4fa94521a7f4 100755 --- a/configure +++ b/configure @@ -760,6 +760,8 @@ def configure_node(o): want_snapshots = not options.without_snapshot o['variables']['want_separate_host_toolset'] = int( cross_compiling and want_snapshots) + o['variables']['want_separate_host_toolset_mkpeephole'] = int( + cross_compiling) if target_arch == 'arm': configure_arm(o) diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore index 805f349a6e4300..29fd9226dee7b0 100644 --- a/deps/v8/.gitignore +++ b/deps/v8/.gitignore @@ -40,15 +40,13 @@ gcsuspects shell shell_g /_* -/build/Debug -/build/gyp -/build/ipch -/build/Release -/build/win_toolchain.json +/build +/gypfiles/win_toolchain.json /buildtools /hydrogen.cfg /obj /out +/out.gn /perf.data /perf.data.old /test/benchmarks/data @@ -59,15 +57,29 @@ shell_g /test/simdjs/data /test/test262/data /test/test262/data.tar +/test/test262/harness /testing/gmock -/testing/gtest +/testing/gtest/* +!/testing/gtest/include +/testing/gtest/include/* +!/testing/gtest/include/gtest +/testing/gtest/include/gtest/* +!/testing/gtest/include/gtest/gtest_prod.h /third_party +/third_party/android_tools +/third_party/cygwin /third_party/icu +/third_party/instrumented_libraries +/third_party/inspector_protocol +/third_party/jinga2 /third_party/llvm /third_party/llvm-build +/third_party/markupsafe +/third_party/WebKit /tools/clang /tools/gcmole/gcmole-tools /tools/gcmole/gcmole-tools.tar.gz +/tools/gyp /tools/jsfunfuzz/jsfunfuzz /tools/jsfunfuzz/jsfunfuzz.tar.gz /tools/luci-go/linux64/isolate @@ -78,6 +90,8 @@ shell_g /tools/swarming_client /tools/visual_studio/Debug /tools/visual_studio/Release +/test/fuzzer/wasm +/test/fuzzer/wasm_asmjs /v8.log.ll /xcodebuild TAGS @@ -86,7 +100,13 @@ GTAGS GRTAGS GSYMS GPATH +tags gtags.files turbo*.cfg turbo*.dot turbo*.json +v8.ignition_dispatches_table.json +/test/fuzzer/wasm.tar.gz +/test/fuzzer/wasm_asmjs.tar.gz +/src/inspector/build/closure-compiler.tar.gz +/src/inspector/build/closure-compiler \ No newline at end of file diff --git a/deps/v8/.gn b/deps/v8/.gn new file mode 100644 index 00000000000000..aee1752d4be65e --- /dev/null +++ b/deps/v8/.gn @@ -0,0 +1,48 @@ +# This file is used by the GN meta build system to find the root of the source +# tree and to set startup options. For documentation on the values set in this +# file, run "gn help dotfile" at the command line. + +# The location of the build configuration file. +buildconfig = "//build/config/BUILDCONFIG.gn" + +# The secondary source root is a parallel directory tree where +# GN build files are placed when they can not be placed directly +# in the source tree, e.g. for third party source trees. +secondary_source = "//build/secondary/" + +# These are the targets to check headers for by default. The files in targets +# matching these patterns (see "gn help label_pattern" for format) will have +# their includes checked for proper dependencies when you run either +# "gn check" or "gn gen --check". +check_targets = [] + +# These are the list of GN files that run exec_script. This whitelist exists +# to force additional review for new uses of exec_script, which is strongly +# discouraged except for gypi_to_gn calls. +exec_script_whitelist = [ + "//build/config/android/BUILD.gn", + "//build/config/android/config.gni", + "//build/config/android/internal_rules.gni", + "//build/config/android/rules.gni", + "//build/config/BUILD.gn", + "//build/config/compiler/BUILD.gn", + "//build/config/gcc/gcc_version.gni", + "//build/config/ios/ios_sdk.gni", + "//build/config/linux/atk/BUILD.gn", + "//build/config/linux/BUILD.gn", + "//build/config/linux/pkg_config.gni", + "//build/config/mac/mac_sdk.gni", + "//build/config/posix/BUILD.gn", + "//build/config/sysroot.gni", + "//build/config/win/BUILD.gn", + "//build/config/win/visual_studio_version.gni", + "//build/gn_helpers.py", + "//build/gypi_to_gn.py", + "//build/toolchain/concurrent_links.gni", + "//build/toolchain/gcc_toolchain.gni", + "//build/toolchain/mac/BUILD.gn", + "//build/toolchain/win/BUILD.gn", + "//build/util/branding.gni", + "//build/util/version.gni", + "//test/test262/BUILD.gn", +] diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index 60b6c51db5eb1a..02562eff766b15 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -40,6 +40,7 @@ Alexis Campailla Andreas Anyuru Andrew Paprocki Andrei Kashcha +Anna Henningsen Bangfu Tao Ben Noordhuis Benjamin Tan @@ -50,7 +51,9 @@ Craig Schlenter Chris Nardi Christopher A. Taylor Daniel Andersson +Daniel Bevenius Daniel James +Deon Dior Douglas Crosher Dusan Milosavljevic Erich Ocean @@ -60,8 +63,10 @@ Felix Geisendörfer Filipe David Manana Franziska Hinkelmann Geoffrey Garside +Gwang Yoon Hwang Han Choongwoo Hirofumi Mako +Honggyu Kim Ioseb Dzmanashvili Isiah Meadows Jan de Mooij @@ -85,13 +90,17 @@ Matthew Sporleder Maxim Mossienko Michael Lutz Michael Smith +Michaël Zasso Mike Gilbert Mike Pennisi Milton Chiang Myeong-bo Shim Nicolas Antonius Ernst Leopold Maria Kaiser +Noj Vek +Oleksandr Chekhovskyi Paolo Giarrusso Patrick Gansterer +Peter Rybin Peter Varga Paul Lind Rafal Krypa @@ -113,4 +122,4 @@ Vladimir Shutoff Yu Yin Zac Hansen Zhongping Wang -柳荣一 \ No newline at end of file +柳荣一 diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn index 28aca24afc81c4..b54d72b077a0eb 100644 --- a/deps/v8/BUILD.gn +++ b/deps/v8/BUILD.gn @@ -4,6 +4,7 @@ import("//build/config/android/config.gni") import("//build/config/arm.gni") +import("//build/config/dcheck_always_on.gni") import("//build/config/mips.gni") import("//build/config/sanitizers/sanitizers.gni") @@ -11,16 +12,58 @@ if (is_android) { import("//build/config/android/rules.gni") } -# Because standalone V8 builds are not supported, assume this is part of a -# Chromium build. +import("gni/v8.gni") +import("gni/isolate.gni") import("//build_overrides/v8.gni") import("snapshot_toolchain.gni") declare_args() { - # Enable the snapshot feature, for fast context creation. - # http://v8project.blogspot.com/2015/09/custom-startup-snapshots.html - v8_use_snapshot = true + # Print to stdout on Android. + v8_android_log_stdout = false + + # Sets -DVERIFY_HEAP. + v8_enable_verify_heap = false + + # Enable compiler warnings when using V8_DEPRECATED apis. + v8_deprecation_warnings = false + + # Enable compiler warnings when using V8_DEPRECATE_SOON apis. + v8_imminent_deprecation_warnings = "" + + # Embeds the given script into the snapshot. + v8_embed_script = "" + + # Sets -dENABLE_DISASSEMBLER. + v8_enable_disassembler = "" + + # Sets -dENABLE_GDB_JIT_INTERFACE. + v8_enable_gdbjit = "" + + # Sets -dENABLE_HANDLE_ZAPPING. + v8_enable_handle_zapping = true + + # Enable ECMAScript Internationalization API. Enabling this feature will + # add a dependency on the ICU library. + v8_enable_i18n_support = true + + # Enable slow dchecks. + v8_enable_slow_dchecks = false + + # Interpreted regexp engine exists as platform-independent alternative + # based where the regular expression is compiled to a bytecode. + v8_interpreted_regexp = false + + # Sets -dOBJECT_PRINT. + v8_object_print = "" + + # With post mortem support enabled, metadata is embedded into libv8 that + # describes various parameters of the VM for use by debuggers. See + # tools/gen-postmortem-metadata.py for details. + v8_postmortem_support = false + + # Switches off inlining in V8. + v8_no_inline = false # Similar to vfp but on MIPS. v8_can_use_fpu_instructions = true @@ -29,38 +72,40 @@ declare_args() { v8_use_mips_abi_hardfloat = true } -# TODO(jochen): These will need to be user-settable to support standalone V8 -# builds. -v8_deprecation_warnings = false -v8_enable_disassembler = false -v8_enable_gdbjit = false -v8_enable_handle_zapping = false -v8_enable_i18n_support = true -v8_enable_verify_heap = false -v8_interpreted_regexp = false -v8_object_print = false -v8_postmortem_support = false -v8_random_seed = "314159265" -v8_toolset_for_d8 = "host" - -if (is_msan) { - # Running the V8-generated code on an ARM simulator is a powerful hack that - # allows the tool to see the memory accesses from JITted code. Without this - # flag, JS code causes false positive reports from MSan. - v8_target_arch = "arm64" -} else { - v8_target_arch = target_cpu +# Set project-specific defaults for some args if not provided in args.gn. The +# defaults can be set in the respective build_overrides files. +if (v8_imminent_deprecation_warnings == "") { + if (defined(v8_imminent_deprecation_warnings_default)) { + v8_imminent_deprecation_warnings = v8_imminent_deprecation_warnings_default + } else { + v8_imminent_deprecation_warnings = false + } +} +if (v8_enable_gdbjit == "") { + if (defined(v8_enable_gdbjit_default)) { + v8_enable_gdbjit = v8_enable_gdbjit_default + } else { + v8_enable_gdbjit = false + } } -if (v8_use_snapshot && v8_use_external_startup_data) { - snapshot_target = ":v8_external_snapshot" -} else if (v8_use_snapshot) { - snapshot_target = ":v8_snapshot" -} else { - assert(!v8_use_external_startup_data) - snapshot_target = ":v8_nosnapshot" +# Derived defaults. +if (v8_object_print == "") { + v8_object_print = is_debug && !v8_optimized_debug +} +if (v8_enable_disassembler == "") { + v8_enable_disassembler = is_debug && !v8_optimized_debug } +# Specifies if the target build is a simulator build. Comparing target cpu +# with v8 target cpu to not affect simulator builds for making cross-compile +# snapshots. +is_target_simulator = target_cpu != v8_target_cpu + +v8_generated_peephole_source = "$target_gen_dir/bytecode-peephole-table.cc" +v8_random_seed = "314159265" +v8_toolset_for_shell = "host" + ############################################################################### # Configurations # @@ -70,10 +115,7 @@ config("internal_config") { include_dirs = [ "." ] if (is_component_build) { - defines = [ - "V8_SHARED", - "BUILDING_V8_SHARED", - ] + defines = [ "BUILDING_V8_SHARED" ] } } @@ -88,16 +130,25 @@ config("libplatform_config") { include_dirs = [ "include" ] } +# This config should be applied to code using the libsampler. +config("libsampler_config") { + include_dirs = [ "include" ] +} + # This config should only be applied to code using V8 and not any V8 code # itself. config("external_config") { if (is_component_build) { - defines = [ - "V8_SHARED", - "USING_V8_SHARED", - ] + defines = [ "USING_V8_SHARED" ] } include_dirs = [ "include" ] + if (v8_enable_inspector_override) { + include_dirs += [ "$target_gen_dir/include" ] + } + libs = [] + if (is_android && current_toolchain != host_toolchain) { + libs += [ "log" ] + } } # This config should only be applied to code that needs to be explicitly @@ -113,31 +164,34 @@ config("features") { defines = [] - if (v8_enable_disassembler == true) { + if (v8_enable_disassembler) { defines += [ "ENABLE_DISASSEMBLER" ] } - if (v8_enable_gdbjit == true) { + if (v8_enable_gdbjit) { defines += [ "ENABLE_GDB_JIT_INTERFACE" ] } - if (v8_object_print == true) { + if (v8_object_print) { defines += [ "OBJECT_PRINT" ] } - if (v8_enable_verify_heap == true) { + if (v8_enable_verify_heap) { defines += [ "VERIFY_HEAP" ] } - if (v8_interpreted_regexp == true) { + if (v8_interpreted_regexp) { defines += [ "V8_INTERPRETED_REGEXP" ] } - if (v8_deprecation_warnings == true) { + if (v8_deprecation_warnings) { defines += [ "V8_DEPRECATION_WARNINGS" ] } - if (v8_enable_i18n_support == true) { + if (v8_imminent_deprecation_warnings) { + defines += [ "V8_IMMINENT_DEPRECATION_WARNINGS" ] + } + if (v8_enable_i18n_support) { defines += [ "V8_I18N_SUPPORT" ] } - if (v8_enable_handle_zapping == true) { + if (v8_enable_handle_zapping) { defines += [ "ENABLE_HANDLE_ZAPPING" ] } - if (v8_use_external_startup_data == true) { + if (v8_use_external_startup_data) { defines += [ "V8_USE_EXTERNAL_STARTUP_DATA" ] } } @@ -147,10 +201,11 @@ config("toolchain") { defines = [] cflags = [] + ldflags = [] - if (v8_target_arch == "arm") { + if (v8_current_cpu == "arm") { defines += [ "V8_TARGET_ARCH_ARM" ] - if (arm_version == 7) { + if (arm_version >= 7) { defines += [ "CAN_USE_ARMV7_INSTRUCTIONS" ] } if (arm_fpu == "vfpv3-d16") { @@ -167,6 +222,7 @@ config("toolchain") { "CAN_USE_NEON", ] } + # TODO(jochen): Add support for arm_test_noprobe. if (current_cpu != "arm") { @@ -178,11 +234,18 @@ config("toolchain") { } } } - if (v8_target_arch == "arm64") { + if (v8_current_cpu == "arm64") { defines += [ "V8_TARGET_ARCH_ARM64" ] } + + # Mips64el/mipsel simulators. + if (is_target_simulator && + (v8_current_cpu == "mipsel" || v8_current_cpu == "mips64el")) { + defines += [ "_MIPS_TARGET_SIMULATOR" ] + } + # TODO(jochen): Add support for mips. - if (v8_target_arch == "mipsel") { + if (v8_current_cpu == "mipsel") { defines += [ "V8_TARGET_ARCH_MIPS" ] if (v8_can_use_fpu_instructions) { defines += [ "CAN_USE_FPU_INSTRUCTIONS" ] @@ -212,14 +275,17 @@ config("toolchain") { } else if (mips_arch_variant == "r1") { defines += [ "FPU_MODE_FP32" ] } + # TODO(jochen): Add support for mips_arch_variant rx and loongson. } + # TODO(jochen): Add support for mips64. - if (v8_target_arch == "mips64el") { + if (v8_current_cpu == "mips64el") { defines += [ "V8_TARGET_ARCH_MIPS64" ] if (v8_can_use_fpu_instructions) { defines += [ "CAN_USE_FPU_INSTRUCTIONS" ] } + # TODO(jochen): Add support for big endian host byteorder. defines += [ "V8_TARGET_ARCH_MIPS64_LE" ] if (v8_use_mips_abi_hardfloat) { @@ -236,30 +302,43 @@ config("toolchain") { defines += [ "_MIPS_ARCH_MIPS64R2" ] } } - if (v8_target_arch == "s390") { + if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") { defines += [ "V8_TARGET_ARCH_S390" ] - } - if (v8_target_arch == "s390x") { - defines += [ - "V8_TARGET_ARCH_S390", - "V8_TARGET_ARCH_S390X", - ] + if (v8_current_cpu == "s390x") { + defines += [ "V8_TARGET_ARCH_S390X" ] + } + if (host_cpu == "x64" || host_cpu == "x86") { + defines += [ "V8_TARGET_ARCH_S390_LE_SIM" ] + } } - if (v8_target_arch == "x86") { + if (v8_current_cpu == "x86") { defines += [ "V8_TARGET_ARCH_IA32" ] + if (is_win) { + # Ensure no surprising artifacts from 80bit double math with x86. + cflags += [ "/arch:SSE2" ] + } } - if (v8_target_arch == "x64") { + if (v8_current_cpu == "x64") { defines += [ "V8_TARGET_ARCH_X64" ] + if (is_win) { + # Increase the initial stack size. The default is 1MB, this is 2MB. This + # applies only to executables and shared libraries produced by V8 since + # ldflags are not pushed to dependants. + ldflags += [ "/STACK:2097152" ] + } } - - if (is_win) { - defines += [ "WIN32" ] - # TODO(jochen): Support v8_enable_prof. + if (is_android && v8_android_log_stdout) { + defines += [ "V8_ANDROID_LOG_STDOUT" ] } + # TODO(jochen): Support v8_enable_prof on Windows. # TODO(jochen): Add support for compiling with simulators. if (is_debug) { + if (is_linux && v8_enable_backtrace) { + ldflags += [ "-rdynamic" ] + } + # TODO(jochen): Add support for different debug optimization levels. defines += [ "ENABLE_DISASSEMBLER", @@ -267,7 +346,19 @@ config("toolchain") { "OBJECT_PRINT", "VERIFY_HEAP", "DEBUG", - "OPTIMIZED_DEBUG", + "TRACE_MAPS", + ] + if (v8_enable_slow_dchecks) { + defines += [ "ENABLE_SLOW_DCHECKS" ] + } + } else if (dcheck_always_on) { + defines += [ "DEBUG" ] + } + + if (v8_no_inline) { + cflags += [ + "-fno-inline-functions", + "-fno-inline", ] } } @@ -297,26 +388,20 @@ action("js2c") { "src/js/symbol.js", "src/js/array.js", "src/js/string.js", - "src/js/uri.js", "src/js/math.js", - "src/third_party/fdlibm/fdlibm.js", "src/js/regexp.js", "src/js/arraybuffer.js", "src/js/typedarray.js", - "src/js/iterator-prototype.js", - "src/js/generator.js", - "src/js/object-observe.js", "src/js/collection.js", "src/js/weak-collection.js", "src/js/collection-iterator.js", "src/js/promise.js", "src/js/messages.js", - "src/js/json.js", "src/js/array-iterator.js", - "src/js/string-iterator.js", "src/js/templates.js", "src/js/spread.js", "src/js/proxy.js", + "src/js/async-await.js", "src/debug/mirrors.js", "src/debug/debug.js", "src/debug/liveedit.js", @@ -359,22 +444,22 @@ action("js2c_experimental") { sources = [ "src/js/macros.py", "src/messages.h", - "src/js/generator.js", "src/js/harmony-atomics.js", - "src/js/harmony-regexp-exec.js", - "src/js/harmony-object-observe.js", - "src/js/harmony-sharedarraybuffer.js", "src/js/harmony-simd.js", - "src/js/harmony-species.js", - "src/js/harmony-unicode-regexps.js", "src/js/harmony-string-padding.js", - "src/js/promise-extra.js", ] outputs = [ "$target_gen_dir/experimental-libraries.cc", ] + if (v8_enable_i18n_support) { + sources += [ + "src/js/datetime-format-to-parts.js", + "src/js/icu-case-mapping.js", + ] + } + args = [ rebase_path("$target_gen_dir/experimental-libraries.cc", root_build_dir), @@ -473,14 +558,22 @@ action("d8_js2c") { rebase_path(inputs, root_build_dir) } -if (is_android) { +if (is_android && enable_java_templates) { android_assets("v8_external_startup_data_assets") { if (v8_use_external_startup_data) { deps = [ "//v8", ] - renaming_sources = v8_external_startup_data_renaming_sources - renaming_destinations = v8_external_startup_data_renaming_destinations + sources = [ + "$root_out_dir/natives_blob.bin", + ] + renaming_sources = [ "$root_out_dir/snapshot_blob.bin" ] + if (current_cpu == "arm" || current_cpu == "x86" || + current_cpu == "mipsel") { + renaming_destinations = [ "snapshot_blob_32.bin" ] + } else { + renaming_destinations = [ "snapshot_blob_64.bin" ] + } disable_compression = true } } @@ -543,17 +636,19 @@ action("run_mksnapshot") { visibility = [ ":*" ] # Only targets in this file can depend on this. deps = [ - ":mksnapshot($snapshot_toolchain)", + ":mksnapshot($v8_snapshot_toolchain)", ] script = "tools/run.py" + sources = [] + outputs = [ "$target_gen_dir/snapshot.cc", ] args = [ - "./" + rebase_path(get_label_info(":mksnapshot($snapshot_toolchain)", + "./" + rebase_path(get_label_info(":mksnapshot($v8_snapshot_toolchain)", "root_out_dir") + "/mksnapshot", root_build_dir), "--startup_src", @@ -574,13 +669,79 @@ action("run_mksnapshot") { rebase_path("$root_out_dir/snapshot_blob.bin", root_build_dir), ] } + + if (v8_embed_script != "") { + sources += [ v8_embed_script ] + args += [ rebase_path(v8_embed_script, root_build_dir) ] + } +} + +action("run_mkpeephole") { + visibility = [ ":*" ] # Only targets in this file can depend on this. + + deps = [ + ":mkpeephole($v8_snapshot_toolchain)", + ] + + outputs = [ + v8_generated_peephole_source, + ] + + sources = [] + + script = "tools/run.py" + + args = [ + "./" + rebase_path(get_label_info(":mkpeephole($v8_snapshot_toolchain)", + "root_out_dir") + "/mkpeephole", + root_build_dir), + rebase_path(v8_generated_peephole_source, root_build_dir), + ] +} + +action("v8_dump_build_config") { + script = "tools/testrunner/utils/dump_build_config.py" + outputs = [ + "$root_out_dir/v8_build_config.json", + ] + args = [ + rebase_path("$root_out_dir/v8_build_config.json", root_build_dir), + "dcheck_always_on=$dcheck_always_on", + "is_asan=$is_asan", + "is_cfi=$is_cfi", + "is_component_build=$is_component_build", + "is_debug=$is_debug", + "is_msan=$is_msan", + "is_tsan=$is_tsan", + "target_cpu=\"$target_cpu\"", + "v8_enable_i18n_support=$v8_enable_i18n_support", + "v8_target_cpu=\"$v8_target_cpu\"", + "v8_use_snapshot=$v8_use_snapshot", + ] } ############################################################################### # Source Sets (aka static libraries) # -source_set("v8_nosnapshot") { +source_set("v8_maybe_snapshot") { + if (v8_use_snapshot && v8_use_external_startup_data) { + public_deps = [ + ":v8_external_snapshot", + ] + } else if (v8_use_snapshot) { + public_deps = [ + ":v8_snapshot", + ] + } else { + # Ignore v8_use_external_startup_data setting if no snapshot is used. + public_deps = [ + ":v8_nosnapshot", + ] + } +} + +v8_source_set("v8_nosnapshot") { visibility = [ ":*" ] # Only targets in this file can depend on this. deps = [ @@ -599,16 +760,10 @@ source_set("v8_nosnapshot") { "src/snapshot/snapshot-empty.cc", ] - configs -= [ "//build/config/compiler:chromium_code" ] - configs += [ "//build/config/compiler:no_chromium_code" ] - configs += [ - ":internal_config", - ":features", - ":toolchain", - ] + configs = [ ":internal_config" ] } -source_set("v8_snapshot") { +v8_source_set("v8_snapshot") { # Only targets in this file and the top-level visibility target can # depend on this. visibility = [ @@ -637,17 +792,11 @@ source_set("v8_snapshot") { "$target_gen_dir/snapshot.cc", ] - configs -= [ "//build/config/compiler:chromium_code" ] - configs += [ "//build/config/compiler:no_chromium_code" ] - configs += [ - ":internal_config", - ":features", - ":toolchain", - ] + configs = [ ":internal_config" ] } if (v8_use_external_startup_data) { - source_set("v8_external_snapshot") { + v8_source_set("v8_external_snapshot") { visibility = [ ":*" ] # Only targets in this file can depend on this. deps = [ @@ -667,22 +816,17 @@ if (v8_use_external_startup_data) { "src/snapshot/snapshot-external.cc", ] - configs -= [ "//build/config/compiler:chromium_code" ] - configs += [ "//build/config/compiler:no_chromium_code" ] - configs += [ - ":internal_config", - ":features", - ":toolchain", - ] + configs = [ ":internal_config" ] } } -source_set("v8_base") { +v8_source_set("v8_base") { visibility = [ ":*" ] # Only targets in this file can depend on this. sources = [ - # TODO(fmeawad): This needs to be updated to support standalone V8 builds. - "../base/trace_event/common/trace_event_common.h", + "//base/trace_event/common/trace_event_common.h", + + ### gcmole(all) ### "include/v8-debug.h", "include/v8-experimental.h", "include/v8-platform.h", @@ -700,6 +844,7 @@ source_set("v8_base") { "src/allocation-site-scopes.h", "src/allocation.cc", "src/allocation.h", + "src/api-arguments-inl.h", "src/api-arguments.cc", "src/api-arguments.h", "src/api-experimental.cc", @@ -710,33 +855,45 @@ source_set("v8_base") { "src/api.h", "src/arguments.cc", "src/arguments.h", + "src/asmjs/asm-js.cc", + "src/asmjs/asm-js.h", + "src/asmjs/asm-typer.cc", + "src/asmjs/asm-typer.h", + "src/asmjs/asm-types.cc", + "src/asmjs/asm-types.h", + "src/asmjs/asm-wasm-builder.cc", + "src/asmjs/asm-wasm-builder.h", "src/assembler.cc", "src/assembler.h", "src/assert-scope.cc", "src/assert-scope.h", "src/ast/ast-expression-rewriter.cc", "src/ast/ast-expression-rewriter.h", - "src/ast/ast-expression-visitor.cc", - "src/ast/ast-expression-visitor.h", "src/ast/ast-literal-reindexer.cc", "src/ast/ast-literal-reindexer.h", "src/ast/ast-numbering.cc", "src/ast/ast-numbering.h", + "src/ast/ast-traversal-visitor.h", + "src/ast/ast-type-bounds.h", + "src/ast/ast-types.cc", + "src/ast/ast-types.h", "src/ast/ast-value-factory.cc", "src/ast/ast-value-factory.h", "src/ast/ast.cc", "src/ast/ast.h", + "src/ast/compile-time-value.cc", + "src/ast/compile-time-value.h", + "src/ast/context-slot-cache.cc", + "src/ast/context-slot-cache.h", "src/ast/modules.cc", "src/ast/modules.h", "src/ast/prettyprinter.cc", "src/ast/prettyprinter.h", "src/ast/scopeinfo.cc", - "src/ast/scopeinfo.h", "src/ast/scopes.cc", "src/ast/scopes.h", "src/ast/variables.cc", "src/ast/variables.h", - "src/atomic-utils.h", "src/background-parsing-task.cc", "src/background-parsing-task.h", "src/bailout-reason.cc", @@ -751,8 +908,37 @@ source_set("v8_base") { "src/bit-vector.h", "src/bootstrapper.cc", "src/bootstrapper.h", - "src/builtins.cc", - "src/builtins.h", + "src/builtins/builtins-api.cc", + "src/builtins/builtins-array.cc", + "src/builtins/builtins-arraybuffer.cc", + "src/builtins/builtins-boolean.cc", + "src/builtins/builtins-call.cc", + "src/builtins/builtins-callsite.cc", + "src/builtins/builtins-conversion.cc", + "src/builtins/builtins-dataview.cc", + "src/builtins/builtins-date.cc", + "src/builtins/builtins-debug.cc", + "src/builtins/builtins-error.cc", + "src/builtins/builtins-function.cc", + "src/builtins/builtins-generator.cc", + "src/builtins/builtins-global.cc", + "src/builtins/builtins-handler.cc", + "src/builtins/builtins-internal.cc", + "src/builtins/builtins-interpreter.cc", + "src/builtins/builtins-iterator.cc", + "src/builtins/builtins-json.cc", + "src/builtins/builtins-math.cc", + "src/builtins/builtins-number.cc", + "src/builtins/builtins-object.cc", + "src/builtins/builtins-proxy.cc", + "src/builtins/builtins-reflect.cc", + "src/builtins/builtins-sharedarraybuffer.cc", + "src/builtins/builtins-string.cc", + "src/builtins/builtins-symbol.cc", + "src/builtins/builtins-typedarray.cc", + "src/builtins/builtins-utils.h", + "src/builtins/builtins.cc", + "src/builtins/builtins.h", "src/cached-powers.cc", "src/cached-powers.h", "src/cancelable-task.cc", @@ -761,8 +947,11 @@ source_set("v8_base") { "src/char-predicates.cc", "src/char-predicates.h", "src/checks.h", + "src/code-events.h", "src/code-factory.cc", "src/code-factory.h", + "src/code-stub-assembler.cc", + "src/code-stub-assembler.h", "src/code-stubs-hydrogen.cc", "src/code-stubs.cc", "src/code-stubs.h", @@ -773,8 +962,14 @@ source_set("v8_base") { "src/compilation-cache.h", "src/compilation-dependencies.cc", "src/compilation-dependencies.h", + "src/compilation-info.cc", + "src/compilation-info.h", "src/compilation-statistics.cc", "src/compilation-statistics.h", + "src/compiler-dispatcher/compiler-dispatcher-job.cc", + "src/compiler-dispatcher/compiler-dispatcher-job.h", + "src/compiler-dispatcher/optimizing-compile-dispatcher.cc", + "src/compiler-dispatcher/optimizing-compile-dispatcher.h", "src/compiler.cc", "src/compiler.h", "src/compiler/access-builder.cc", @@ -795,16 +990,16 @@ source_set("v8_base") { "src/compiler/bytecode-branch-analysis.h", "src/compiler/bytecode-graph-builder.cc", "src/compiler/bytecode-graph-builder.h", + "src/compiler/bytecode-loop-analysis.cc", + "src/compiler/bytecode-loop-analysis.h", "src/compiler/c-linkage.cc", - "src/compiler/change-lowering.cc", - "src/compiler/change-lowering.h", - "src/compiler/coalesced-live-ranges.cc", - "src/compiler/coalesced-live-ranges.h", + "src/compiler/checkpoint-elimination.cc", + "src/compiler/checkpoint-elimination.h", + "src/compiler/code-assembler.cc", + "src/compiler/code-assembler.h", "src/compiler/code-generator-impl.h", "src/compiler/code-generator.cc", "src/compiler/code-generator.h", - "src/compiler/code-stub-assembler.cc", - "src/compiler/code-stub-assembler.h", "src/compiler/common-node-cache.cc", "src/compiler/common-node-cache.h", "src/compiler/common-operator-reducer.cc", @@ -820,6 +1015,8 @@ source_set("v8_base") { "src/compiler/dead-code-elimination.cc", "src/compiler/dead-code-elimination.h", "src/compiler/diamond.h", + "src/compiler/effect-control-linearizer.cc", + "src/compiler/effect-control-linearizer.h", "src/compiler/escape-analysis-reducer.cc", "src/compiler/escape-analysis-reducer.h", "src/compiler/escape-analysis.cc", @@ -842,8 +1039,6 @@ source_set("v8_base") { "src/compiler/graph-visualizer.h", "src/compiler/graph.cc", "src/compiler/graph.h", - "src/compiler/greedy-allocator.cc", - "src/compiler/greedy-allocator.h", "src/compiler/instruction-codes.h", "src/compiler/instruction-scheduler.cc", "src/compiler/instruction-scheduler.h", @@ -895,10 +1090,15 @@ source_set("v8_base") { "src/compiler/loop-analysis.cc", "src/compiler/loop-analysis.h", "src/compiler/loop-peeling.cc", + "src/compiler/loop-peeling.h", + "src/compiler/loop-variable-optimizer.cc", + "src/compiler/loop-variable-optimizer.h", "src/compiler/machine-operator-reducer.cc", "src/compiler/machine-operator-reducer.h", "src/compiler/machine-operator.cc", "src/compiler/machine-operator.h", + "src/compiler/memory-optimizer.cc", + "src/compiler/memory-optimizer.h", "src/compiler/move-optimizer.cc", "src/compiler/move-optimizer.h", "src/compiler/node-aux-data.h", @@ -914,6 +1114,8 @@ source_set("v8_base") { "src/compiler/node.h", "src/compiler/opcodes.cc", "src/compiler/opcodes.h", + "src/compiler/operation-typer.cc", + "src/compiler/operation-typer.h", "src/compiler/operator-properties.cc", "src/compiler/operator-properties.h", "src/compiler/operator.cc", @@ -926,6 +1128,8 @@ source_set("v8_base") { "src/compiler/pipeline.h", "src/compiler/raw-machine-assembler.cc", "src/compiler/raw-machine-assembler.h", + "src/compiler/redundancy-elimination.cc", + "src/compiler/redundancy-elimination.h", "src/compiler/register-allocator-verifier.cc", "src/compiler/register-allocator-verifier.h", "src/compiler/register-allocator.cc", @@ -948,14 +1152,21 @@ source_set("v8_base") { "src/compiler/source-position.h", "src/compiler/state-values-utils.cc", "src/compiler/state-values-utils.h", + "src/compiler/store-store-elimination.cc", + "src/compiler/store-store-elimination.h", "src/compiler/tail-call-optimization.cc", "src/compiler/tail-call-optimization.h", + "src/compiler/type-cache.cc", + "src/compiler/type-cache.h", "src/compiler/type-hint-analyzer.cc", "src/compiler/type-hint-analyzer.h", - "src/compiler/type-hints.cc", - "src/compiler/type-hints.h", + "src/compiler/typed-optimization.cc", + "src/compiler/typed-optimization.h", "src/compiler/typer.cc", "src/compiler/typer.h", + "src/compiler/types.cc", + "src/compiler/types.h", + "src/compiler/unwinding-info-writer.h", "src/compiler/value-numbering-reducer.cc", "src/compiler/value-numbering-reducer.h", "src/compiler/verifier.cc", @@ -973,6 +1184,7 @@ source_set("v8_base") { "src/conversions-inl.h", "src/conversions.cc", "src/conversions.h", + "src/counters-inl.h", "src/counters.cc", "src/counters.h", "src/crankshaft/compilation-phase.cc", @@ -1032,6 +1244,7 @@ source_set("v8_base") { "src/crankshaft/lithium-allocator.h", "src/crankshaft/lithium-codegen.cc", "src/crankshaft/lithium-codegen.h", + "src/crankshaft/lithium-inl.h", "src/crankshaft/lithium.cc", "src/crankshaft/lithium.h", "src/crankshaft/typing.cc", @@ -1052,6 +1265,8 @@ source_set("v8_base") { "src/debug/debug.h", "src/debug/liveedit.cc", "src/debug/liveedit.h", + "src/deoptimize-reason.cc", + "src/deoptimize-reason.h", "src/deoptimizer.cc", "src/deoptimizer.h", "src/disasm.h", @@ -1063,6 +1278,8 @@ source_set("v8_base") { "src/dtoa.cc", "src/dtoa.h", "src/effects.h", + "src/eh-frame.cc", + "src/eh-frame.h", "src/elements-kind.cc", "src/elements-kind.h", "src/elements.cc", @@ -1075,6 +1292,8 @@ source_set("v8_base") { "src/extensions/free-buffer-extension.h", "src/extensions/gc-extension.cc", "src/extensions/gc-extension.h", + "src/extensions/ignition-statistics-extension.cc", + "src/extensions/ignition-statistics-extension.h", "src/extensions/statistics-extension.cc", "src/extensions/statistics-extension.h", "src/extensions/trigger-failure-extension.cc", @@ -1111,10 +1330,12 @@ source_set("v8_base") { "src/handles-inl.h", "src/handles.cc", "src/handles.h", - "src/hashmap.h", "src/heap-symbols.h", + "src/heap/array-buffer-tracker-inl.h", "src/heap/array-buffer-tracker.cc", "src/heap/array-buffer-tracker.h", + "src/heap/code-stats.cc", + "src/heap/code-stats.h", "src/heap/gc-idle-time-handler.cc", "src/heap/gc-idle-time-handler.h", "src/heap/gc-tracer.cc", @@ -1122,6 +1343,7 @@ source_set("v8_base") { "src/heap/heap-inl.h", "src/heap/heap.cc", "src/heap/heap.h", + "src/heap/incremental-marking-inl.h", "src/heap/incremental-marking-job.cc", "src/heap/incremental-marking-job.h", "src/heap/incremental-marking.cc", @@ -1129,6 +1351,7 @@ source_set("v8_base") { "src/heap/mark-compact-inl.h", "src/heap/mark-compact.cc", "src/heap/mark-compact.h", + "src/heap/marking.h", "src/heap/memory-reducer.cc", "src/heap/memory-reducer.h", "src/heap/object-stats.cc", @@ -1158,6 +1381,7 @@ source_set("v8_base") { "src/ic/call-optimization.h", "src/ic/handler-compiler.cc", "src/ic/handler-compiler.h", + "src/ic/handler-configuration.h", "src/ic/ic-compiler.cc", "src/ic/ic-compiler.h", "src/ic/ic-inl.h", @@ -1177,10 +1401,31 @@ source_set("v8_base") { "src/interpreter/bytecode-array-builder.h", "src/interpreter/bytecode-array-iterator.cc", "src/interpreter/bytecode-array-iterator.h", + "src/interpreter/bytecode-array-writer.cc", + "src/interpreter/bytecode-array-writer.h", + "src/interpreter/bytecode-dead-code-optimizer.cc", + "src/interpreter/bytecode-dead-code-optimizer.h", + "src/interpreter/bytecode-decoder.cc", + "src/interpreter/bytecode-decoder.h", + "src/interpreter/bytecode-flags.cc", + "src/interpreter/bytecode-flags.h", "src/interpreter/bytecode-generator.cc", "src/interpreter/bytecode-generator.h", + "src/interpreter/bytecode-label.cc", + "src/interpreter/bytecode-label.h", + "src/interpreter/bytecode-operands.cc", + "src/interpreter/bytecode-operands.h", + "src/interpreter/bytecode-peephole-optimizer.cc", + "src/interpreter/bytecode-peephole-optimizer.h", + "src/interpreter/bytecode-peephole-table.h", + "src/interpreter/bytecode-pipeline.cc", + "src/interpreter/bytecode-pipeline.h", "src/interpreter/bytecode-register-allocator.cc", "src/interpreter/bytecode-register-allocator.h", + "src/interpreter/bytecode-register-optimizer.cc", + "src/interpreter/bytecode-register-optimizer.h", + "src/interpreter/bytecode-register.cc", + "src/interpreter/bytecode-register.h", "src/interpreter/bytecode-traits.h", "src/interpreter/bytecodes.cc", "src/interpreter/bytecodes.h", @@ -1196,12 +1441,12 @@ source_set("v8_base") { "src/interpreter/interpreter-intrinsics.h", "src/interpreter/interpreter.cc", "src/interpreter/interpreter.h", - "src/interpreter/source-position-table.cc", - "src/interpreter/source-position-table.h", "src/isolate-inl.h", "src/isolate.cc", "src/isolate.h", + "src/json-parser.cc", "src/json-parser.h", + "src/json-stringifier.cc", "src/json-stringifier.h", "src/keys.cc", "src/keys.h", @@ -1210,11 +1455,16 @@ source_set("v8_base") { "src/layout-descriptor.h", "src/list-inl.h", "src/list.h", + "src/locked-queue-inl.h", + "src/locked-queue.h", "src/log-inl.h", "src/log-utils.cc", "src/log-utils.h", "src/log.cc", "src/log.h", + "src/lookup-cache-inl.h", + "src/lookup-cache.cc", + "src/lookup-cache.h", "src/lookup.cc", "src/lookup.h", "src/machine-type.cc", @@ -1230,15 +1480,17 @@ source_set("v8_base") { "src/objects-printer.cc", "src/objects.cc", "src/objects.h", - "src/optimizing-compile-dispatcher.cc", - "src/optimizing-compile-dispatcher.h", "src/ostreams.cc", "src/ostreams.h", + "src/parsing/duplicate-finder.cc", + "src/parsing/duplicate-finder.h", "src/parsing/expression-classifier.h", "src/parsing/func-name-inferrer.cc", "src/parsing/func-name-inferrer.h", "src/parsing/parameter-initializer-rewriter.cc", "src/parsing/parameter-initializer-rewriter.h", + "src/parsing/parse-info.cc", + "src/parsing/parse-info.h", "src/parsing/parser-base.h", "src/parsing/parser.cc", "src/parsing/parser.h", @@ -1275,12 +1527,14 @@ source_set("v8_base") { "src/profiler/profile-generator-inl.h", "src/profiler/profile-generator.cc", "src/profiler/profile-generator.h", - "src/profiler/sampler.cc", - "src/profiler/sampler.h", + "src/profiler/profiler-listener.cc", + "src/profiler/profiler-listener.h", "src/profiler/sampling-heap-profiler.cc", "src/profiler/sampling-heap-profiler.h", "src/profiler/strings-storage.cc", "src/profiler/strings-storage.h", + "src/profiler/tick-sample.cc", + "src/profiler/tick-sample.h", "src/profiler/unbound-queue-inl.h", "src/profiler/unbound-queue.h", "src/property-descriptor.cc", @@ -1319,6 +1573,7 @@ source_set("v8_base") { "src/runtime/runtime-compiler.cc", "src/runtime/runtime-date.cc", "src/runtime/runtime-debug.cc", + "src/runtime/runtime-error.cc", "src/runtime/runtime-forin.cc", "src/runtime/runtime-function.cc", "src/runtime/runtime-futex.cc", @@ -1326,13 +1581,11 @@ source_set("v8_base") { "src/runtime/runtime-i18n.cc", "src/runtime/runtime-internal.cc", "src/runtime/runtime-interpreter.cc", - "src/runtime/runtime-json.cc", "src/runtime/runtime-literals.cc", "src/runtime/runtime-liveedit.cc", "src/runtime/runtime-maths.cc", "src/runtime/runtime-numbers.cc", "src/runtime/runtime-object.cc", - "src/runtime/runtime-observe.cc", "src/runtime/runtime-operators.cc", "src/runtime/runtime-proxy.cc", "src/runtime/runtime-regexp.cc", @@ -1342,8 +1595,8 @@ source_set("v8_base") { "src/runtime/runtime-symbol.cc", "src/runtime/runtime-test.cc", "src/runtime/runtime-typedarray.cc", - "src/runtime/runtime-uri.cc", "src/runtime/runtime-utils.h", + "src/runtime/runtime-wasm.cc", "src/runtime/runtime.cc", "src/runtime/runtime.h", "src/safepoint-table.cc", @@ -1369,6 +1622,8 @@ source_set("v8_base") { "src/snapshot/snapshot.h", "src/snapshot/startup-serializer.cc", "src/snapshot/startup-serializer.h", + "src/source-position-table.cc", + "src/source-position-table.h", "src/source-position.h", "src/splay-tree-inl.h", "src/splay-tree.h", @@ -1381,26 +1636,18 @@ source_set("v8_base") { "src/string-stream.h", "src/strtod.cc", "src/strtod.h", - "src/third_party/fdlibm/fdlibm.cc", - "src/third_party/fdlibm/fdlibm.h", "src/tracing/trace-event.cc", "src/tracing/trace-event.h", "src/transitions-inl.h", "src/transitions.cc", "src/transitions.h", - "src/type-cache.cc", - "src/type-cache.h", "src/type-feedback-vector-inl.h", "src/type-feedback-vector.cc", "src/type-feedback-vector.h", + "src/type-hints.cc", + "src/type-hints.h", "src/type-info.cc", "src/type-info.h", - "src/types.cc", - "src/types.h", - "src/typing-asm.cc", - "src/typing-asm.h", - "src/typing-reset.cc", - "src/typing-reset.h", "src/unicode-cache-inl.h", "src/unicode-cache.h", "src/unicode-decoder.cc", @@ -1408,6 +1655,8 @@ source_set("v8_base") { "src/unicode-inl.h", "src/unicode.cc", "src/unicode.h", + "src/uri.cc", + "src/uri.h", "src/utils-inl.h", "src/utils.cc", "src/utils.h", @@ -1416,20 +1665,31 @@ source_set("v8_base") { "src/v8memory.h", "src/v8threads.cc", "src/v8threads.h", + "src/value-serializer.cc", + "src/value-serializer.h", + "src/vector.h", "src/version.cc", "src/version.h", "src/vm-state-inl.h", "src/vm-state.h", - "src/wasm/asm-wasm-builder.cc", - "src/wasm/asm-wasm-builder.h", "src/wasm/ast-decoder.cc", "src/wasm/ast-decoder.h", "src/wasm/decoder.h", "src/wasm/encoder.cc", "src/wasm/encoder.h", + "src/wasm/leb-helper.h", "src/wasm/module-decoder.cc", "src/wasm/module-decoder.h", + "src/wasm/switch-logic.cc", + "src/wasm/switch-logic.h", + "src/wasm/wasm-debug.cc", + "src/wasm/wasm-debug.h", + "src/wasm/wasm-external-refs.cc", "src/wasm/wasm-external-refs.h", + "src/wasm/wasm-function-name-table.cc", + "src/wasm/wasm-function-name-table.h", + "src/wasm/wasm-interpreter.cc", + "src/wasm/wasm-interpreter.h", "src/wasm/wasm-js.cc", "src/wasm/wasm-js.h", "src/wasm/wasm-macro-gen.h", @@ -1439,14 +1699,19 @@ source_set("v8_base") { "src/wasm/wasm-opcodes.h", "src/wasm/wasm-result.cc", "src/wasm/wasm-result.h", - "src/zone-allocator.h", - "src/zone-containers.h", - "src/zone.cc", - "src/zone.h", + "src/zone/accounting-allocator.cc", + "src/zone/accounting-allocator.h", + "src/zone/zone-allocator.h", + "src/zone/zone-allocator.h", + "src/zone/zone-containers.h", + "src/zone/zone-segment.h", + "src/zone/zone.cc", + "src/zone/zone.h", ] - if (v8_target_arch == "x86") { - sources += [ + if (v8_current_cpu == "x86") { + sources += [ ### gcmole(arch:ia32) ### + "src/builtins/ia32/builtins-ia32.cc", "src/compiler/ia32/code-generator-ia32.cc", "src/compiler/ia32/instruction-codes-ia32.h", "src/compiler/ia32/instruction-scheduler-ia32.cc", @@ -1462,7 +1727,6 @@ source_set("v8_base") { "src/ia32/assembler-ia32-inl.h", "src/ia32/assembler-ia32.cc", "src/ia32/assembler-ia32.h", - "src/ia32/builtins-ia32.cc", "src/ia32/code-stubs-ia32.cc", "src/ia32/code-stubs-ia32.h", "src/ia32/codegen-ia32.cc", @@ -1475,6 +1739,8 @@ source_set("v8_base") { "src/ia32/interface-descriptors-ia32.cc", "src/ia32/macro-assembler-ia32.cc", "src/ia32/macro-assembler-ia32.h", + "src/ia32/simulator-ia32.cc", + "src/ia32/simulator-ia32.h", "src/ic/ia32/access-compiler-ia32.cc", "src/ic/ia32/handler-compiler-ia32.cc", "src/ic/ia32/ic-compiler-ia32.cc", @@ -1483,12 +1749,15 @@ source_set("v8_base") { "src/regexp/ia32/regexp-macro-assembler-ia32.cc", "src/regexp/ia32/regexp-macro-assembler-ia32.h", ] - } else if (v8_target_arch == "x64") { - sources += [ + } else if (v8_current_cpu == "x64") { + sources += [ ### gcmole(arch:x64) ### + "src/builtins/x64/builtins-x64.cc", "src/compiler/x64/code-generator-x64.cc", "src/compiler/x64/instruction-codes-x64.h", "src/compiler/x64/instruction-scheduler-x64.cc", "src/compiler/x64/instruction-selector-x64.cc", + "src/compiler/x64/unwinding-info-writer-x64.cc", + "src/compiler/x64/unwinding-info-writer-x64.h", "src/crankshaft/x64/lithium-codegen-x64.cc", "src/crankshaft/x64/lithium-codegen-x64.h", "src/crankshaft/x64/lithium-gap-resolver-x64.cc", @@ -1504,10 +1773,10 @@ source_set("v8_base") { "src/ic/x64/stub-cache-x64.cc", "src/regexp/x64/regexp-macro-assembler-x64.cc", "src/regexp/x64/regexp-macro-assembler-x64.h", + "src/third_party/valgrind/valgrind.h", "src/x64/assembler-x64-inl.h", "src/x64/assembler-x64.cc", "src/x64/assembler-x64.h", - "src/x64/builtins-x64.cc", "src/x64/code-stubs-x64.cc", "src/x64/code-stubs-x64.h", "src/x64/codegen-x64.cc", @@ -1515,18 +1784,21 @@ source_set("v8_base") { "src/x64/cpu-x64.cc", "src/x64/deoptimizer-x64.cc", "src/x64/disasm-x64.cc", + "src/x64/eh-frame-x64.cc", "src/x64/frames-x64.cc", "src/x64/frames-x64.h", "src/x64/interface-descriptors-x64.cc", "src/x64/macro-assembler-x64.cc", "src/x64/macro-assembler-x64.h", + "src/x64/simulator-x64.cc", + "src/x64/simulator-x64.h", + "src/x64/sse-instr.h", ] - } else if (v8_target_arch == "arm") { - sources += [ + } else if (v8_current_cpu == "arm") { + sources += [ ### gcmole(arch:arm) ### "src/arm/assembler-arm-inl.h", "src/arm/assembler-arm.cc", "src/arm/assembler-arm.h", - "src/arm/builtins-arm.cc", "src/arm/code-stubs-arm.cc", "src/arm/code-stubs-arm.h", "src/arm/codegen-arm.cc", @@ -1536,6 +1808,7 @@ source_set("v8_base") { "src/arm/cpu-arm.cc", "src/arm/deoptimizer-arm.cc", "src/arm/disasm-arm.cc", + "src/arm/eh-frame-arm.cc", "src/arm/frames-arm.cc", "src/arm/frames-arm.h", "src/arm/interface-descriptors-arm.cc", @@ -1544,10 +1817,13 @@ source_set("v8_base") { "src/arm/macro-assembler-arm.h", "src/arm/simulator-arm.cc", "src/arm/simulator-arm.h", + "src/builtins/arm/builtins-arm.cc", "src/compiler/arm/code-generator-arm.cc", "src/compiler/arm/instruction-codes-arm.h", "src/compiler/arm/instruction-scheduler-arm.cc", "src/compiler/arm/instruction-selector-arm.cc", + "src/compiler/arm/unwinding-info-writer-arm.cc", + "src/compiler/arm/unwinding-info-writer-arm.h", "src/crankshaft/arm/lithium-arm.cc", "src/crankshaft/arm/lithium-arm.h", "src/crankshaft/arm/lithium-codegen-arm.cc", @@ -1564,12 +1840,11 @@ source_set("v8_base") { "src/regexp/arm/regexp-macro-assembler-arm.cc", "src/regexp/arm/regexp-macro-assembler-arm.h", ] - } else if (v8_target_arch == "arm64") { - sources += [ + } else if (v8_current_cpu == "arm64") { + sources += [ ### gcmole(arch:arm64) ### "src/arm64/assembler-arm64-inl.h", "src/arm64/assembler-arm64.cc", "src/arm64/assembler-arm64.h", - "src/arm64/builtins-arm64.cc", "src/arm64/code-stubs-arm64.cc", "src/arm64/code-stubs-arm64.h", "src/arm64/codegen-arm64.cc", @@ -1582,6 +1857,7 @@ source_set("v8_base") { "src/arm64/deoptimizer-arm64.cc", "src/arm64/disasm-arm64.cc", "src/arm64/disasm-arm64.h", + "src/arm64/eh-frame-arm64.cc", "src/arm64/frames-arm64.cc", "src/arm64/frames-arm64.h", "src/arm64/instructions-arm64.cc", @@ -1597,10 +1873,13 @@ source_set("v8_base") { "src/arm64/simulator-arm64.h", "src/arm64/utils-arm64.cc", "src/arm64/utils-arm64.h", + "src/builtins/arm64/builtins-arm64.cc", "src/compiler/arm64/code-generator-arm64.cc", "src/compiler/arm64/instruction-codes-arm64.h", "src/compiler/arm64/instruction-scheduler-arm64.cc", "src/compiler/arm64/instruction-selector-arm64.cc", + "src/compiler/arm64/unwinding-info-writer-arm64.cc", + "src/compiler/arm64/unwinding-info-writer-arm64.h", "src/crankshaft/arm64/delayed-masm-arm64-inl.h", "src/crankshaft/arm64/delayed-masm-arm64.cc", "src/crankshaft/arm64/delayed-masm-arm64.h", @@ -1620,8 +1899,9 @@ source_set("v8_base") { "src/regexp/arm64/regexp-macro-assembler-arm64.cc", "src/regexp/arm64/regexp-macro-assembler-arm64.h", ] - } else if (v8_target_arch == "mipsel") { - sources += [ + } else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") { + sources += [ ### gcmole(arch:mipsel) ### + "src/builtins/mips/builtins-mips.cc", "src/compiler/mips/code-generator-mips.cc", "src/compiler/mips/instruction-codes-mips.h", "src/compiler/mips/instruction-scheduler-mips.cc", @@ -1642,7 +1922,6 @@ source_set("v8_base") { "src/mips/assembler-mips-inl.h", "src/mips/assembler-mips.cc", "src/mips/assembler-mips.h", - "src/mips/builtins-mips.cc", "src/mips/code-stubs-mips.cc", "src/mips/code-stubs-mips.h", "src/mips/codegen-mips.cc", @@ -1662,12 +1941,13 @@ source_set("v8_base") { "src/regexp/mips/regexp-macro-assembler-mips.cc", "src/regexp/mips/regexp-macro-assembler-mips.h", ] - } else if (v8_target_arch == "mips64el") { - sources += [ - "compiler/mips64/code-generator-mips64.cc", - "compiler/mips64/instruction-codes-mips64.h", - "compiler/mips64/instruction-scheduler-mips64.cc", - "compiler/mips64/instruction-selector-mips64.cc", + } else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { + sources += [ ### gcmole(arch:mips64el) ### + "src/builtins/mips64/builtins-mips64.cc", + "src/compiler/mips64/code-generator-mips64.cc", + "src/compiler/mips64/instruction-codes-mips64.h", + "src/compiler/mips64/instruction-scheduler-mips64.cc", + "src/compiler/mips64/instruction-selector-mips64.cc", "src/crankshaft/mips64/lithium-codegen-mips64.cc", "src/crankshaft/mips64/lithium-codegen-mips64.h", "src/crankshaft/mips64/lithium-gap-resolver-mips64.cc", @@ -1684,7 +1964,6 @@ source_set("v8_base") { "src/mips64/assembler-mips64-inl.h", "src/mips64/assembler-mips64.cc", "src/mips64/assembler-mips64.h", - "src/mips64/builtins-mips64.cc", "src/mips64/code-stubs-mips64.cc", "src/mips64/code-stubs-mips64.h", "src/mips64/codegen-mips64.cc", @@ -1704,8 +1983,51 @@ source_set("v8_base") { "src/regexp/mips64/regexp-macro-assembler-mips64.cc", "src/regexp/mips64/regexp-macro-assembler-mips64.h", ] - } else if (v8_target_arch == "s390" || v8_target_arch == "s390x") { - sources += [ + } else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") { + sources += [ ### gcmole(arch:ppc) ### + "src/builtins/ppc/builtins-ppc.cc", + "src/compiler/ppc/code-generator-ppc.cc", + "src/compiler/ppc/instruction-codes-ppc.h", + "src/compiler/ppc/instruction-scheduler-ppc.cc", + "src/compiler/ppc/instruction-selector-ppc.cc", + "src/crankshaft/ppc/lithium-codegen-ppc.cc", + "src/crankshaft/ppc/lithium-codegen-ppc.h", + "src/crankshaft/ppc/lithium-gap-resolver-ppc.cc", + "src/crankshaft/ppc/lithium-gap-resolver-ppc.h", + "src/crankshaft/ppc/lithium-ppc.cc", + "src/crankshaft/ppc/lithium-ppc.h", + "src/debug/ppc/debug-ppc.cc", + "src/full-codegen/ppc/full-codegen-ppc.cc", + "src/ic/ppc/access-compiler-ppc.cc", + "src/ic/ppc/handler-compiler-ppc.cc", + "src/ic/ppc/ic-compiler-ppc.cc", + "src/ic/ppc/ic-ppc.cc", + "src/ic/ppc/stub-cache-ppc.cc", + "src/ppc/assembler-ppc-inl.h", + "src/ppc/assembler-ppc.cc", + "src/ppc/assembler-ppc.h", + "src/ppc/code-stubs-ppc.cc", + "src/ppc/code-stubs-ppc.h", + "src/ppc/codegen-ppc.cc", + "src/ppc/codegen-ppc.h", + "src/ppc/constants-ppc.cc", + "src/ppc/constants-ppc.h", + "src/ppc/cpu-ppc.cc", + "src/ppc/deoptimizer-ppc.cc", + "src/ppc/disasm-ppc.cc", + "src/ppc/frames-ppc.cc", + "src/ppc/frames-ppc.h", + "src/ppc/interface-descriptors-ppc.cc", + "src/ppc/macro-assembler-ppc.cc", + "src/ppc/macro-assembler-ppc.h", + "src/ppc/simulator-ppc.cc", + "src/ppc/simulator-ppc.h", + "src/regexp/ppc/regexp-macro-assembler-ppc.cc", + "src/regexp/ppc/regexp-macro-assembler-ppc.h", + ] + } else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") { + sources += [ ### gcmole(arch:s390) ### + "src/builtins/s390/builtins-s390.cc", "src/compiler/s390/code-generator-s390.cc", "src/compiler/s390/instruction-codes-s390.h", "src/compiler/s390/instruction-scheduler-s390.cc", @@ -1728,7 +2050,6 @@ source_set("v8_base") { "src/s390/assembler-s390-inl.h", "src/s390/assembler-s390.cc", "src/s390/assembler-s390.h", - "src/s390/builtins-s390.cc", "src/s390/code-stubs-s390.cc", "src/s390/code-stubs-s390.h", "src/s390/codegen-s390.cc", @@ -1746,26 +2067,59 @@ source_set("v8_base") { "src/s390/simulator-s390.cc", "src/s390/simulator-s390.h", ] + } else if (v8_current_cpu == "x87") { + sources += [ ### gcmole(arch:x87) ### + "src/builtins/x87/builtins-x87.cc", + "src/compiler/x87/code-generator-x87.cc", + "src/compiler/x87/instruction-codes-x87.h", + "src/compiler/x87/instruction-scheduler-x87.cc", + "src/compiler/x87/instruction-selector-x87.cc", + "src/crankshaft/x87/lithium-codegen-x87.cc", + "src/crankshaft/x87/lithium-codegen-x87.h", + "src/crankshaft/x87/lithium-gap-resolver-x87.cc", + "src/crankshaft/x87/lithium-gap-resolver-x87.h", + "src/crankshaft/x87/lithium-x87.cc", + "src/crankshaft/x87/lithium-x87.h", + "src/debug/x87/debug-x87.cc", + "src/full-codegen/x87/full-codegen-x87.cc", + "src/ic/x87/access-compiler-x87.cc", + "src/ic/x87/handler-compiler-x87.cc", + "src/ic/x87/ic-compiler-x87.cc", + "src/ic/x87/ic-x87.cc", + "src/ic/x87/stub-cache-x87.cc", + "src/regexp/x87/regexp-macro-assembler-x87.cc", + "src/regexp/x87/regexp-macro-assembler-x87.h", + "src/x87/assembler-x87-inl.h", + "src/x87/assembler-x87.cc", + "src/x87/assembler-x87.h", + "src/x87/code-stubs-x87.cc", + "src/x87/code-stubs-x87.h", + "src/x87/codegen-x87.cc", + "src/x87/codegen-x87.h", + "src/x87/cpu-x87.cc", + "src/x87/deoptimizer-x87.cc", + "src/x87/disasm-x87.cc", + "src/x87/frames-x87.cc", + "src/x87/frames-x87.h", + "src/x87/interface-descriptors-x87.cc", + "src/x87/macro-assembler-x87.cc", + "src/x87/macro-assembler-x87.h", + "src/x87/simulator-x87.cc", + "src/x87/simulator-x87.h", + ] } - configs -= [ "//build/config/compiler:chromium_code" ] - configs += [ "//build/config/compiler:no_chromium_code" ] - configs += [ - ":internal_config", - ":features", - ":toolchain", - ] - - if (!is_debug) { - configs -= [ "//build/config/compiler:default_optimization" ] - configs += [ "//build/config/compiler:optimize_max" ] - } + configs = [ ":internal_config" ] defines = [] deps = [ ":v8_libbase", + ":v8_libsampler", ] + sources += [ v8_generated_peephole_source ] + deps += [ ":run_mkpeephole" ] + if (is_win) { # TODO(jschuh): crbug.com/167187 fix size_t to int truncations. cflags = [ "/wd4267" ] @@ -1776,9 +2130,6 @@ source_set("v8_base") { if (is_win) { deps += [ "//third_party/icu:icudata" ] } - - # TODO(jochen): Add support for icu_use_data_file_flag - defines += [ "ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_FILE" ] } else { sources -= [ "src/i18n.cc", @@ -1790,15 +2141,18 @@ source_set("v8_base") { sources += [ "$target_gen_dir/debug-support.cc" ] deps += [ ":postmortem-metadata" ] } + + if (v8_enable_inspector_override) { + deps += [ "src/inspector:inspector" ] + } } -source_set("v8_libbase") { +v8_source_set("v8_libbase") { visibility = [ ":*" ] # Only targets in this file can depend on this. sources = [ - "src/base/accounting-allocator.cc", - "src/base/accounting-allocator.h", "src/base/adapters.h", + "src/base/atomic-utils.h", "src/base/atomicops.h", "src/base/atomicops_internals_arm64_gcc.h", "src/base/atomicops_internals_arm_gcc.h", @@ -1806,7 +2160,6 @@ source_set("v8_libbase") { "src/base/atomicops_internals_mac.h", "src/base/atomicops_internals_mips64_gcc.h", "src/base/atomicops_internals_mips_gcc.h", - "src/base/atomicops_internals_portable.h", "src/base/atomicops_internals_s390_gcc.h", "src/base/atomicops_internals_tsan.h", "src/base/atomicops_internals_x86_gcc.cc", @@ -1815,13 +2168,24 @@ source_set("v8_libbase") { "src/base/bits.cc", "src/base/bits.h", "src/base/build_config.h", + "src/base/compiler-specific.h", "src/base/cpu.cc", "src/base/cpu.h", + "src/base/debug/stack_trace.cc", + "src/base/debug/stack_trace.h", "src/base/division-by-constant.cc", "src/base/division-by-constant.h", + "src/base/file-utils.cc", + "src/base/file-utils.h", "src/base/flags.h", + "src/base/format-macros.h", + "src/base/free_deleter.h", "src/base/functional.cc", "src/base/functional.h", + "src/base/hashmap-entry.h", + "src/base/hashmap.h", + "src/base/ieee754.cc", + "src/base/ieee754.h", "src/base/iterator.h", "src/base/lazy-instance.h", "src/base/logging.cc", @@ -1843,25 +2207,13 @@ source_set("v8_libbase") { "src/base/safe_conversions_impl.h", "src/base/safe_math.h", "src/base/safe_math_impl.h", - "src/base/smart-pointers.h", "src/base/sys-info.cc", "src/base/sys-info.h", "src/base/utils/random-number-generator.cc", "src/base/utils/random-number-generator.h", ] - configs -= [ "//build/config/compiler:chromium_code" ] - configs += [ "//build/config/compiler:no_chromium_code" ] - configs += [ - ":internal_config_base", - ":features", - ":toolchain", - ] - - if (!is_debug) { - configs -= [ "//build/config/compiler:default_optimization" ] - configs += [ "//build/config/compiler:optimize_max" ] - } + configs = [ ":internal_config_base" ] defines = [] @@ -1870,7 +2222,10 @@ source_set("v8_libbase") { } if (is_linux) { - sources += [ "src/base/platform/platform-linux.cc" ] + sources += [ + "src/base/debug/stack_trace_posix.cc", + "src/base/platform/platform-linux.cc", + ] libs = [ "dl", @@ -1883,18 +2238,31 @@ source_set("v8_libbase") { "rt", ] if (host_os == "mac") { - sources += [ "src/base/platform/platform-macos.cc" ] + sources += [ + "src/base/debug/stack_trace_posix.cc", + "src/base/platform/platform-macos.cc", + ] } else { - sources += [ "src/base/platform/platform-linux.cc" ] + sources += [ + "src/base/debug/stack_trace_posix.cc", + "src/base/platform/platform-linux.cc", + ] } } else { - sources += [ "src/base/platform/platform-linux.cc" ] + sources += [ + "src/base/debug/stack_trace_android.cc", + "src/base/platform/platform-linux.cc", + ] } } else if (is_mac) { - sources += [ "src/base/platform/platform-macos.cc" ] + sources += [ + "src/base/debug/stack_trace_posix.cc", + "src/base/platform/platform-macos.cc", + ] } else if (is_win) { # TODO(jochen): Add support for cygwin. sources += [ + "src/base/debug/stack_trace_win.cc", "src/base/platform/platform-win32.cc", "src/base/win32-headers.h", ] @@ -1902,6 +2270,8 @@ source_set("v8_libbase") { defines += [ "_CRT_RAND_S" ] # for rand_s() libs = [ + "dbghelp.lib", + "shlwapi.lib", "winmm.lib", "ws2_32.lib", ] @@ -1910,36 +2280,51 @@ source_set("v8_libbase") { # TODO(jochen): Add support for qnx, freebsd, openbsd, netbsd, and solaris. } -source_set("v8_libplatform") { +v8_source_set("v8_libplatform") { sources = [ + "//base/trace_event/common/trace_event_common.h", "include/libplatform/libplatform.h", + "include/libplatform/v8-tracing.h", "src/libplatform/default-platform.cc", "src/libplatform/default-platform.h", "src/libplatform/task-queue.cc", "src/libplatform/task-queue.h", + "src/libplatform/tracing/trace-buffer.cc", + "src/libplatform/tracing/trace-buffer.h", + "src/libplatform/tracing/trace-config.cc", + "src/libplatform/tracing/trace-object.cc", + "src/libplatform/tracing/trace-writer.cc", + "src/libplatform/tracing/trace-writer.h", + "src/libplatform/tracing/tracing-controller.cc", "src/libplatform/worker-thread.cc", "src/libplatform/worker-thread.h", ] - configs -= [ "//build/config/compiler:chromium_code" ] - configs += [ "//build/config/compiler:no_chromium_code" ] - configs += [ - ":internal_config_base", - ":features", - ":toolchain", + configs = [ ":internal_config_base" ] + + public_configs = [ ":libplatform_config" ] + + deps = [ + ":v8_libbase", ] +} - if (!is_debug) { - configs -= [ "//build/config/compiler:default_optimization" ] - configs += [ "//build/config/compiler:optimize_max" ] - } +v8_source_set("v8_libsampler") { + sources = [ + "src/libsampler/sampler.cc", + "src/libsampler/sampler.h", + ] + + configs = [ ":internal_config_base" ] + + public_configs = [ ":libsampler_config" ] deps = [ ":v8_libbase", ] } -source_set("fuzzer_support") { +v8_source_set("fuzzer_support") { visibility = [ ":*" ] # Only targets in this file can depend on this. sources = [ @@ -1947,136 +2332,339 @@ source_set("fuzzer_support") { "test/fuzzer/fuzzer-support.h", ] - configs -= [ "//build/config/compiler:chromium_code" ] - configs += [ "//build/config/compiler:no_chromium_code" ] - configs += [ - ":internal_config_base", - ":libplatform_config", - ":features", - ":toolchain", + configs = [ ":internal_config_base" ] + + deps = [ + ":v8", ] + public_deps = [ + ":v8_libplatform", + ] +} + +# Used by fuzzers that would require exposing too many symbols for a proper +# component build. +v8_source_set("fuzzer_support_nocomponent") { + visibility = [ ":*" ] # Only targets in this file can depend on this. + + sources = [ + "test/fuzzer/fuzzer-support.cc", + "test/fuzzer/fuzzer-support.h", + ] + + configs = [ ":internal_config_base" ] + deps = [ + ":v8_maybe_snapshot", + ] + + public_deps = [ ":v8_libplatform", - snapshot_target, ] } +v8_source_set("simple_fuzzer") { + sources = [ + "test/fuzzer/fuzzer.cc", + ] + + configs = [ ":internal_config_base" ] +} + ############################################################################### # Executables # -if (current_toolchain == snapshot_toolchain) { - executable("mksnapshot") { +if (current_toolchain == v8_snapshot_toolchain) { + v8_executable("mksnapshot") { visibility = [ ":*" ] # Only targets in this file can depend on this. sources = [ "src/snapshot/mksnapshot.cc", ] - configs -= [ "//build/config/compiler:chromium_code" ] - configs += [ "//build/config/compiler:no_chromium_code" ] - configs += [ - ":internal_config", - ":libplatform_config", - ":features", - ":toolchain", - ] + configs = [ ":internal_config" ] deps = [ ":v8_base", ":v8_libplatform", ":v8_nosnapshot", "//build/config/sanitizers:deps", + "//build/win:default_exe_manifest", ] } } +v8_executable("mkpeephole") { + # mkpeephole needs to be built for the build host so the peephole lookup + # table can built during build. The table depends on the properties of + # bytecodes that are described in bytecodes.{cc,h}. + visibility = [ ":*" ] # Only targets in this file can depend on this. + + sources = [ + "src/interpreter/bytecode-operands.cc", + "src/interpreter/bytecode-operands.h", + "src/interpreter/bytecode-peephole-optimizer.h", + "src/interpreter/bytecode-traits.h", + "src/interpreter/bytecodes.cc", + "src/interpreter/bytecodes.h", + "src/interpreter/mkpeephole.cc", + ] + + configs = [ + ":external_config", + ":internal_config", + ] + + deps = [ + ":v8_libbase", + "//build/config/sanitizers:deps", + "//build/win:default_exe_manifest", + ] +} + ############################################################################### # Public targets # +want_v8_shell = + (current_toolchain == host_toolchain && v8_toolset_for_shell == "host") || + (current_toolchain == v8_snapshot_toolchain && + v8_toolset_for_shell == "host") || + (current_toolchain != host_toolchain && v8_toolset_for_shell == "target") + +group("gn_all") { + testonly = true + + deps = [ + ":d8", + ":v8_hello_world", + ":v8_parser_shell", + ":v8_sample_process", + ":v8_simple_json_fuzzer", + ":v8_simple_parser_fuzzer", + ":v8_simple_regexp_fuzzer", + ":v8_simple_wasm_asmjs_fuzzer", + ":v8_simple_wasm_fuzzer", + "test:gn_all", + "tools:gn_all", + ] + + if (want_v8_shell) { + deps += [ ":v8_shell" ] + } + + if (v8_test_isolation_mode != "noop") { + deps += [ ":d8_run" ] + } +} + if (is_component_build) { - component("v8") { + v8_component("v8") { sources = [ "src/v8dll-main.cc", ] + deps = [ + ":v8_dump_build_config", + ] + public_deps = [ ":v8_base", - snapshot_target, + ":v8_maybe_snapshot", ] - configs -= [ "//build/config/compiler:chromium_code" ] - configs += [ "//build/config/compiler:no_chromium_code" ] - configs += [ - ":internal_config", - ":features", - ":toolchain", - ] + configs = [ ":internal_config" ] public_configs = [ ":external_config" ] - - libs = [] - if (is_android && current_toolchain != host_toolchain) { - libs += [ "log" ] - } } } else { group("v8") { + deps = [ + ":v8_dump_build_config", + ] + public_deps = [ ":v8_base", - snapshot_target, + ":v8_maybe_snapshot", ] public_configs = [ ":external_config" ] } } -if ((current_toolchain == host_toolchain && v8_toolset_for_d8 == "host") || - (current_toolchain == snapshot_toolchain && v8_toolset_for_d8 == "host") || - (current_toolchain != host_toolchain && v8_toolset_for_d8 == "target")) { - executable("d8") { +v8_executable("d8") { + sources = [ + "$target_gen_dir/d8-js.cc", + "src/d8.cc", + "src/d8.h", + ] + + configs = [ + # Note: don't use :internal_config here because this target will get + # the :external_config applied to it by virtue of depending on :v8, and + # you can't have both applied to the same target. + ":internal_config_base", + ] + + deps = [ + ":d8_js2c", + ":v8", + ":v8_libplatform", + "//build/config/sanitizers:deps", + "//build/win:default_exe_manifest", + ] + + # TODO(jochen): Add support for vtunejit. + + if (is_posix) { + sources += [ "src/d8-posix.cc" ] + } else if (is_win) { + sources += [ "src/d8-windows.cc" ] + } + + if (v8_enable_i18n_support) { + deps += [ "//third_party/icu" ] + } +} + +v8_isolate_run("d8") { + deps = [ + ":d8", + ] + + isolate = "//src/d8.isolate" +} + +v8_executable("v8_hello_world") { + sources = [ + "samples/hello-world.cc", + ] + + configs = [ + # Note: don't use :internal_config here because this target will get + # the :external_config applied to it by virtue of depending on :v8, and + # you can't have both applied to the same target. + ":internal_config_base", + ] + + deps = [ + ":v8", + ":v8_libplatform", + "//build/config/sanitizers:deps", + "//build/win:default_exe_manifest", + ] + + if (v8_enable_i18n_support) { + deps += [ "//third_party/icu" ] + } +} + +v8_executable("v8_sample_process") { + sources = [ + "samples/process.cc", + ] + + configs = [ + # Note: don't use :internal_config here because this target will get + # the :external_config applied to it by virtue of depending on :v8, and + # you can't have both applied to the same target. + ":internal_config_base", + ] + + deps = [ + ":v8", + ":v8_libplatform", + "//build/config/sanitizers:deps", + "//build/win:default_exe_manifest", + ] + + if (v8_enable_i18n_support) { + deps += [ "//third_party/icu" ] + } +} + +v8_executable("v8_parser_shell") { + sources = [ + "tools/parser-shell.cc", + "tools/shell-utils.h", + ] + + configs = [ + ":external_config", + ":internal_config_base", + ] + + deps = [ + ":v8_libplatform", + "//build/config/sanitizers:deps", + "//build/win:default_exe_manifest", + ] + + if (is_component_build) { + # v8_parser_shell can't be built against a shared library, so we + # need to depend on the underlying static target in that case. + deps += [ ":v8_maybe_snapshot" ] + } else { + deps += [ ":v8" ] + } + + if (v8_enable_i18n_support) { + deps += [ "//third_party/icu" ] + } + + if (is_win) { + # Suppress warnings about importing locally defined symbols. + if (is_component_build) { + ldflags = [ + "/ignore:4049", + "/ignore:4217", + ] + } + } +} + +if (want_v8_shell) { + v8_executable("v8_shell") { sources = [ - "src/d8.cc", - "src/d8.h", + "samples/shell.cc", ] - configs -= [ "//build/config/compiler:chromium_code" ] - configs += [ "//build/config/compiler:no_chromium_code" ] - configs += [ + configs = [ # Note: don't use :internal_config here because this target will get # the :external_config applied to it by virtue of depending on :v8, and # you can't have both applied to the same target. ":internal_config_base", - ":features", - ":toolchain", ] deps = [ - ":d8_js2c", ":v8", ":v8_libplatform", "//build/config/sanitizers:deps", + "//build/win:default_exe_manifest", ] - # TODO(jochen): Add support for vtunejit. - - if (is_posix) { - sources += [ "src/d8-posix.cc" ] - } else if (is_win) { - sources += [ "src/d8-windows.cc" ] - } - - if (!is_component_build) { - sources += [ "$target_gen_dir/d8-js.cc" ] - } if (v8_enable_i18n_support) { deps += [ "//third_party/icu" ] } } } -source_set("json_fuzzer") { +template("v8_fuzzer") { + name = target_name + forward_variables_from(invoker, "*") + v8_executable("v8_simple_" + name) { + deps = [ + ":" + name, + ":simple_fuzzer", + "//build/win:default_exe_manifest", + ] + + configs = [ ":external_config" ] + } +} + +v8_source_set("json_fuzzer") { sources = [ "test/fuzzer/json.cc", ] @@ -2085,36 +2673,34 @@ source_set("json_fuzzer") { ":fuzzer_support", ] - configs -= [ "//build/config/compiler:chromium_code" ] - configs += [ "//build/config/compiler:no_chromium_code" ] - configs += [ - ":internal_config", - ":libplatform_config", - ":features", - ":toolchain", + configs = [ + ":external_config", + ":internal_config_base", ] } -source_set("parser_fuzzer") { +v8_fuzzer("json_fuzzer") { +} + +v8_source_set("parser_fuzzer") { sources = [ "test/fuzzer/parser.cc", ] deps = [ - ":fuzzer_support", + ":fuzzer_support_nocomponent", ] - configs -= [ "//build/config/compiler:chromium_code" ] - configs += [ "//build/config/compiler:no_chromium_code" ] - configs += [ - ":internal_config", - ":libplatform_config", - ":features", - ":toolchain", + configs = [ + ":external_config", + ":internal_config_base", ] } -source_set("regexp_fuzzer") { +v8_fuzzer("parser_fuzzer") { +} + +v8_source_set("regexp_fuzzer") { sources = [ "test/fuzzer/regexp.cc", ] @@ -2123,50 +2709,232 @@ source_set("regexp_fuzzer") { ":fuzzer_support", ] - configs -= [ "//build/config/compiler:chromium_code" ] - configs += [ "//build/config/compiler:no_chromium_code" ] - configs += [ - ":internal_config", - ":libplatform_config", - ":features", - ":toolchain", + configs = [ + ":external_config", + ":internal_config_base", ] } -source_set("wasm_fuzzer") { +v8_fuzzer("regexp_fuzzer") { +} + +v8_source_set("wasm_module_runner") { + sources = [ + "test/common/wasm/wasm-module-runner.cc", + "test/common/wasm/wasm-module-runner.h", + ] + + configs = [ + ":external_config", + ":internal_config_base", + ] +} + +v8_source_set("wasm_fuzzer") { sources = [ "test/fuzzer/wasm.cc", ] deps = [ ":fuzzer_support", + ":wasm_module_runner", ] - configs -= [ "//build/config/compiler:chromium_code" ] - configs += [ "//build/config/compiler:no_chromium_code" ] - configs += [ - ":internal_config", - ":libplatform_config", - ":features", - ":toolchain", + configs = [ + ":external_config", + ":internal_config_base", ] } -source_set("wasm_asmjs_fuzzer") { +v8_fuzzer("wasm_fuzzer") { +} + +v8_source_set("wasm_asmjs_fuzzer") { sources = [ "test/fuzzer/wasm-asmjs.cc", ] deps = [ ":fuzzer_support", + ":wasm_module_runner", ] - configs -= [ "//build/config/compiler:chromium_code" ] - configs += [ "//build/config/compiler:no_chromium_code" ] - configs += [ - ":internal_config", - ":libplatform_config", - ":features", - ":toolchain", + configs = [ + ":external_config", + ":internal_config_base", ] } + +v8_fuzzer("wasm_asmjs_fuzzer") { +} + +v8_source_set("wasm_code_fuzzer") { + sources = [ + "test/fuzzer/wasm-code.cc", + ] + + deps = [ + ":fuzzer_support", + ":wasm_module_runner", + ] + + configs = [ + ":external_config", + ":internal_config_base", + ] +} + +v8_fuzzer("wasm_code_fuzzer") { +} + +v8_source_set("lib_wasm_section_fuzzer") { + sources = [ + "test/fuzzer/wasm-section-fuzzers.cc", + "test/fuzzer/wasm-section-fuzzers.h", + ] + + configs = [ + ":external_config", + ":internal_config_base", + ] +} + +v8_source_set("wasm_types_section_fuzzer") { + sources = [ + "test/fuzzer/wasm-types-section.cc", + ] + + deps = [ + ":fuzzer_support", + ":lib_wasm_section_fuzzer", + ":wasm_module_runner", + ] + + configs = [ + ":external_config", + ":internal_config_base", + ] +} + +v8_fuzzer("wasm_types_section_fuzzer") { +} + +v8_source_set("wasm_names_section_fuzzer") { + sources = [ + "test/fuzzer/wasm-names-section.cc", + ] + + deps = [ + ":fuzzer_support", + ":lib_wasm_section_fuzzer", + ":wasm_module_runner", + ] + + configs = [ + ":external_config", + ":internal_config_base", + ] +} + +v8_fuzzer("wasm_names_section_fuzzer") { +} + +v8_source_set("wasm_globals_section_fuzzer") { + sources = [ + "test/fuzzer/wasm-globals-section.cc", + ] + + deps = [ + ":fuzzer_support", + ":lib_wasm_section_fuzzer", + ":wasm_module_runner", + ] + + configs = [ + ":external_config", + ":internal_config_base", + ] +} + +v8_fuzzer("wasm_globals_section_fuzzer") { +} + +v8_source_set("wasm_imports_section_fuzzer") { + sources = [ + "test/fuzzer/wasm-imports-section.cc", + ] + + deps = [ + ":fuzzer_support", + ":lib_wasm_section_fuzzer", + ":wasm_module_runner", + ] + + configs = [ + ":external_config", + ":internal_config_base", + ] +} + +v8_fuzzer("wasm_imports_section_fuzzer") { +} + +v8_source_set("wasm_function_sigs_section_fuzzer") { + sources = [ + "test/fuzzer/wasm-function-sigs-section.cc", + ] + + deps = [ + ":fuzzer_support", + ":lib_wasm_section_fuzzer", + ":wasm_module_runner", + ] + + configs = [ + ":external_config", + ":internal_config_base", + ] +} + +v8_fuzzer("wasm_function_sigs_section_fuzzer") { +} + +v8_source_set("wasm_memory_section_fuzzer") { + sources = [ + "test/fuzzer/wasm-memory-section.cc", + ] + + deps = [ + ":fuzzer_support", + ":lib_wasm_section_fuzzer", + ":wasm_module_runner", + ] + + configs = [ + ":external_config", + ":internal_config_base", + ] +} + +v8_fuzzer("wasm_memory_section_fuzzer") { +} + +v8_source_set("wasm_data_section_fuzzer") { + sources = [ + "test/fuzzer/wasm-data-section.cc", + ] + + deps = [ + ":fuzzer_support", + ":lib_wasm_section_fuzzer", + ":wasm_module_runner", + ] + + configs = [ + ":external_config", + ":internal_config_base", + ] +} + +v8_fuzzer("wasm_data_section_fuzzer") { +} diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index dc97b8042244cc..0a202dafebb0b1 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,2144 +1,7 @@ -2016-04-06: Version 5.1.281 - - Performance and stability improvements on all platforms. - - -2016-04-05: Version 5.1.280 - - Performance and stability improvements on all platforms. - - -2016-04-05: Version 5.1.279 - - Ship --harmony-regexp-exec (issue 4602). - - Performance and stability improvements on all platforms. - - -2016-04-05: Version 5.1.278 - - [V8] Removed debugger V8::PromiseEvent (Chromium issue 526811). - - [asm.js] Fix typing bug for non-literals in heap access (Chromium issue - 599825). - - Ensure CreateDataProperty works correctly on TypedArrays (Chromium issue - 596394). - - Performance and stability improvements on all platforms. - - -2016-04-05: Version 5.1.277 - - Performance and stability improvements on all platforms. - - -2016-04-05: Version 5.1.276 - - Performance and stability improvements on all platforms. - - -2016-04-05: Version 5.1.275 - - Performance and stability improvements on all platforms. - - -2016-04-05: Version 5.1.274 - - Performance and stability improvements on all platforms. - - -2016-04-05: Version 5.1.273 - - Performance and stability improvements on all platforms. - - -2016-04-05: Version 5.1.272 - - Performance and stability improvements on all platforms. - - -2016-04-05: Version 5.1.271 - - Performance and stability improvements on all platforms. - - -2016-04-04: Version 5.1.270 - - Performance and stability improvements on all platforms. - - -2016-04-04: Version 5.1.269 - - Performance and stability improvements on all platforms. - - -2016-04-04: Version 5.1.268 - - Performance and stability improvements on all platforms. - - -2016-04-04: Version 5.1.267 - - [api] Restrict Template::Set to take templates or primitive values. - - Performance and stability improvements on all platforms. - - -2016-04-04: Version 5.1.266 - - Performance and stability improvements on all platforms. - - -2016-04-04: Version 5.1.265 - - Performance and stability improvements on all platforms. - - -2016-04-04: Version 5.1.264 - - Performance and stability improvements on all platforms. - - -2016-04-04: Version 5.1.263 - - Performance and stability improvements on all platforms. - - -2016-04-04: Version 5.1.262 - - Performance and stability improvements on all platforms. - - -2016-04-04: Version 5.1.261 - - Performance and stability improvements on all platforms. - - -2016-04-04: Version 5.1.260 - - Performance and stability improvements on all platforms. - - -2016-04-04: Version 5.1.259 - - Further ES2015 RegExp spec compliance fixes (issue 4602). - - Performance and stability improvements on all platforms. - - -2016-04-03: Version 5.1.258 - - Performance and stability improvements on all platforms. - - -2016-04-02: Version 5.1.257 - - Performance and stability improvements on all platforms. - - -2016-04-02: Version 5.1.256 - - Performance and stability improvements on all platforms. - - -2016-04-02: Version 5.1.255 - - Performance and stability improvements on all platforms. - - -2016-04-01: Version 5.1.254 - - Performance and stability improvements on all platforms. - - -2016-04-01: Version 5.1.253 - - Performance and stability improvements on all platforms. - - -2016-04-01: Version 5.1.252 - - Performance and stability improvements on all platforms. - - -2016-04-01: Version 5.1.251 - - Performance and stability improvements on all platforms. - - -2016-04-01: Version 5.1.250 - - Performance and stability improvements on all platforms. - - -2016-04-01: Version 5.1.249 - - Performance and stability improvements on all platforms. - - -2016-04-01: Version 5.1.248 - - Performance and stability improvements on all platforms. - - -2016-04-01: Version 5.1.247 - - Performance and stability improvements on all platforms. - - -2016-04-01: Version 5.1.246 - - Performance and stability improvements on all platforms. - - -2016-04-01: Version 5.1.245 - - Performance and stability improvements on all platforms. - - -2016-04-01: Version 5.1.244 - - Performance and stability improvements on all platforms. - - -2016-04-01: Version 5.1.243 - - Performance and stability improvements on all platforms. - - -2016-04-01: Version 5.1.242 - - Performance and stability improvements on all platforms. - - -2016-04-01: Version 5.1.241 - - [GN] Define USE_EABI_HARDFLOAT=1 when arm_float_abi=="hard" (Chromium - issue 592660). - - Ship --harmony-regexp-exec (issue 4602). - - Performance and stability improvements on all platforms. - - -2016-03-31: Version 5.1.240 - - Performance and stability improvements on all platforms. - - -2016-03-31: Version 5.1.239 - - Performance and stability improvements on all platforms. - - -2016-03-31: Version 5.1.238 - - Performance and stability improvements on all platforms. - - -2016-03-31: Version 5.1.237 - - Performance and stability improvements on all platforms. - - -2016-03-31: Version 5.1.236 - - Performance and stability improvements on all platforms. - - -2016-03-31: Version 5.1.235 - - Performance and stability improvements on all platforms. - - -2016-03-31: Version 5.1.234 - - [arm/Linux] Don't rely on KUSER_HELPERS feature (Chromium issue 599051). - - Performance and stability improvements on all platforms. - - -2016-03-31: Version 5.1.233 - - Performance and stability improvements on all platforms. - - -2016-03-31: Version 5.1.232 - - Performance and stability improvements on all platforms. - - -2016-03-31: Version 5.1.231 - - Turn scavenge_reclaim_unmodified_objects on by default (Chromium issue - 4880). - - Performance and stability improvements on all platforms. - - -2016-03-31: Version 5.1.230 - - Performance and stability improvements on all platforms. - - -2016-03-31: Version 5.1.229 - - Performance and stability improvements on all platforms. - - -2016-03-31: Version 5.1.228 - - Performance and stability improvements on all platforms. - - -2016-03-31: Version 5.1.227 - - Performance and stability improvements on all platforms. - - -2016-03-31: Version 5.1.226 - - Performance and stability improvements on all platforms. - - -2016-03-31: Version 5.1.225 - - Performance and stability improvements on all platforms. - - -2016-03-31: Version 5.1.224 - - Raise minimum Mac OS version to 10.7 (issue 4847). - - Performance and stability improvements on all platforms. - - -2016-03-31: Version 5.1.223 - - Performance and stability improvements on all platforms. - - -2016-03-30: Version 5.1.222 - - Performance and stability improvements on all platforms. - - -2016-03-30: Version 5.1.221 - - Performance and stability improvements on all platforms. - - -2016-03-30: Version 5.1.220 - - Stage --harmony-regexp-exec (issue 4602). - - Add fast paths for native RegExps in ES2015 subclass-aware code (issue - 4602). - - [V8] Add FunctionMirror.prototype.contextDebugId method (Chromium issue - 595206). - - Performance and stability improvements on all platforms. - - -2016-03-30: Version 5.1.219 - - Remove RegExp.prototype.source getter compat workaround (issue 4827, - Chromium issue 581577). - - Check for proper types from error handling code (Chromium issue 596718). - - Add ES2015 RegExp full subclassing semantics behind a flag (issue 4602). - - Performance and stability improvements on all platforms. - - -2016-03-24: Version 5.1.218 - - Performance and stability improvements on all platforms. - - -2016-03-24: Version 5.1.217 - - [esnext] implement String padding proposal. - - Performance and stability improvements on all platforms. - - -2016-03-24: Version 5.1.216 - - Performance and stability improvements on all platforms. - - -2016-03-24: Version 5.1.215 - - Performance and stability improvements on all platforms. - - -2016-03-24: Version 5.1.214 - - Performance and stability improvements on all platforms. - - -2016-03-23: Version 5.1.213 - - Implement ES2015 labelled function declaration restrictions (Chromium - issue 595309). - - Performance and stability improvements on all platforms. - - -2016-03-23: Version 5.1.212 - - Performance and stability improvements on all platforms. - - -2016-03-23: Version 5.1.211 - - Performance and stability improvements on all platforms. - - -2016-03-23: Version 5.1.210 - - Performance and stability improvements on all platforms. - - -2016-03-23: Version 5.1.209 - - Performance and stability improvements on all platforms. - - -2016-03-23: Version 5.1.208 - - Performance and stability improvements on all platforms. - - -2016-03-23: Version 5.1.207 - - Performance and stability improvements on all platforms. - - -2016-03-23: Version 5.1.206 - - Performance and stability improvements on all platforms. - - -2016-03-23: Version 5.1.205 - - Performance and stability improvements on all platforms. - - -2016-03-23: Version 5.1.204 - - Performance and stability improvements on all platforms. - - -2016-03-23: Version 5.1.203 - - Performance and stability improvements on all platforms. - - -2016-03-23: Version 5.1.202 - - Performance and stability improvements on all platforms. - - -2016-03-23: Version 5.1.201 - - Performance and stability improvements on all platforms. - - -2016-03-23: Version 5.1.200 - - Performance and stability improvements on all platforms. - - -2016-03-22: Version 5.1.199 - - Performance and stability improvements on all platforms. - - -2016-03-22: Version 5.1.198 - - Performance and stability improvements on all platforms. - - -2016-03-22: Version 5.1.197 - - Performance and stability improvements on all platforms. - - -2016-03-22: Version 5.1.196 - - Performance and stability improvements on all platforms. - - -2016-03-22: Version 5.1.195 - - Performance and stability improvements on all platforms. - - -2016-03-22: Version 5.1.194 - - Fix match default behavior on strings for ES2015 semantics (issue 4602). - - Performance and stability improvements on all platforms. - - -2016-03-22: Version 5.1.193 - - Performance and stability improvements on all platforms. - - -2016-03-22: Version 5.1.192 - - Performance and stability improvements on all platforms. - - -2016-03-22: Version 5.1.191 - - [Interpreter] TurboFan implementation of intrinsics (issue 4822). - - Performance and stability improvements on all platforms. - - -2016-03-22: Version 5.1.190 - - Performance and stability improvements on all platforms. - - -2016-03-21: Version 5.1.189 - - Performance and stability improvements on all platforms. - - -2016-03-21: Version 5.1.188 - - Performance and stability improvements on all platforms. - - -2016-03-21: Version 5.1.187 - - Performance and stability improvements on all platforms. - - -2016-03-21: Version 5.1.186 - - Performance and stability improvements on all platforms. - - -2016-03-21: Version 5.1.185 - - Performance and stability improvements on all platforms. - - -2016-03-21: Version 5.1.184 - - Performance and stability improvements on all platforms. - - -2016-03-21: Version 5.1.183 - - Performance and stability improvements on all platforms. - - -2016-03-21: Version 5.1.182 - - Performance and stability improvements on all platforms. - - -2016-03-21: Version 5.1.181 - - Temporarily undeprecate ForceSet (Chromium issue 595601). - - Performance and stability improvements on all platforms. - - -2016-03-21: Version 5.1.180 - - Performance and stability improvements on all platforms. - - -2016-03-20: Version 5.1.179 - - Performance and stability improvements on all platforms. - - -2016-03-20: Version 5.1.178 - - Performance and stability improvements on all platforms. - - -2016-03-20: Version 5.1.177 - - Performance and stability improvements on all platforms. - - -2016-03-19: Version 5.1.176 - - Performance and stability improvements on all platforms. - - -2016-03-18: Version 5.1.175 - - Performance and stability improvements on all platforms. - - -2016-03-18: Version 5.1.174 - - Performance and stability improvements on all platforms. - - -2016-03-18: Version 5.1.173 - - Parser: Make skipping HTML comments optional (Chromium issue 573887). - - [es7] implement exponentiation operator proposal (issue 3915). - - Performance and stability improvements on all platforms. - - -2016-03-18: Version 5.1.172 - - Performance and stability improvements on all platforms. - - -2016-03-18: Version 5.1.171 - - Performance and stability improvements on all platforms. - - -2016-03-18: Version 5.1.170 - - Performance and stability improvements on all platforms. - - -2016-03-18: Version 5.1.169 - - Performance and stability improvements on all platforms. - - -2016-03-17: Version 5.1.168 - - Performance and stability improvements on all platforms. - - -2016-03-17: Version 5.1.167 - - Throw the right exceptions from setting elements in - Array.prototype.concat (Chromium issue 595319). - - Performance and stability improvements on all platforms. - - -2016-03-17: Version 5.1.166 - - Throw exceptions from CreateDataProperty when should_throw (Chromium - issue 595319). - - Performance and stability improvements on all platforms. - - -2016-03-17: Version 5.1.165 - - Performance and stability improvements on all platforms. - - -2016-03-17: Version 5.1.164 - - Performance and stability improvements on all platforms. - - -2016-03-17: Version 5.1.163 - - Performance and stability improvements on all platforms. - - -2016-03-17: Version 5.1.162 - - Performance and stability improvements on all platforms. - - -2016-03-17: Version 5.1.161 - - Performance and stability improvements on all platforms. - - -2016-03-17: Version 5.1.160 - - Performance and stability improvements on all platforms. - - -2016-03-17: Version 5.1.159 - - Performance and stability improvements on all platforms. - - -2016-03-17: Version 5.1.158 - - Performance and stability improvements on all platforms. - - -2016-03-17: Version 5.1.157 - - Performance and stability improvements on all platforms. - - -2016-03-17: Version 5.1.156 - - Performance and stability improvements on all platforms. - - -2016-03-17: Version 5.1.155 - - Performance and stability improvements on all platforms. - - -2016-03-17: Version 5.1.154 - - Performance and stability improvements on all platforms. - - -2016-03-17: Version 5.1.153 - - Performance and stability improvements on all platforms. - - -2016-03-17: Version 5.1.152 - - Performance and stability improvements on all platforms. - - -2016-03-17: Version 5.1.151 - - Move FastAccessorAssembler from RawMachineAssembler to CodeStubAssembler - (Chromium issue 508898). - - Performance and stability improvements on all platforms. - - -2016-03-17: Version 5.1.150 - - [serializer] Add API to warm up startup snapshot with an additional - script (issue 4836). - - Performance and stability improvements on all platforms. - - -2016-03-17: Version 5.1.149 - - Performance and stability improvements on all platforms. - - -2016-03-17: Version 5.1.148 - - Performance and stability improvements on all platforms. - - -2016-03-17: Version 5.1.147 - - Performance and stability improvements on all platforms. - - -2016-03-16: Version 5.1.146 - - Ship ES2015 restrictions on function declaration locations (issue 4824). - - Performance and stability improvements on all platforms. - - -2016-03-16: Version 5.1.145 - - Performance and stability improvements on all platforms. - - -2016-03-16: Version 5.1.144 - - Performance and stability improvements on all platforms. - - -2016-03-16: Version 5.1.143 - - Performance and stability improvements on all platforms. - - -2016-03-16: Version 5.1.142 - - Performance and stability improvements on all platforms. - - -2016-03-16: Version 5.1.141 - - Performance and stability improvements on all platforms. - - -2016-03-16: Version 5.1.140 - - Put RegExp js code in strict mode (issue 4504). - - Performance and stability improvements on all platforms. - - -2016-03-15: Version 5.1.139 - - Performance and stability improvements on all platforms. - - -2016-03-15: Version 5.1.138 - - [builtins] Fix Array.prototype.concat bug (Chromium issue 594574). - - Performance and stability improvements on all platforms. - - -2016-03-15: Version 5.1.137 - - Performance and stability improvements on all platforms. - - -2016-03-15: Version 5.1.136 - - Performance and stability improvements on all platforms. - - -2016-03-15: Version 5.1.135 - - Ship Array.prototype.values (issue 4247). - - Performance and stability improvements on all platforms. - - -2016-03-15: Version 5.1.134 - - Performance and stability improvements on all platforms. - - -2016-03-15: Version 5.1.133 - - Performance and stability improvements on all platforms. - - -2016-03-15: Version 5.1.132 - - Performance and stability improvements on all platforms. - - -2016-03-15: Version 5.1.131 - - Performance and stability improvements on all platforms. - - -2016-03-15: Version 5.1.130 - - Performance and stability improvements on all platforms. - - -2016-03-15: Version 5.1.129 - - Performance and stability improvements on all platforms. - - -2016-03-15: Version 5.1.128 - - Performance and stability improvements on all platforms. - - -2016-03-14: Version 5.1.127 - - Performance and stability improvements on all platforms. - - -2016-03-14: Version 5.1.126 - - Remove --harmony-modules flag and let embedder decide when modules are - used (issue 1569, Chromium issue 594639). - - Performance and stability improvements on all platforms. - - -2016-03-14: Version 5.1.125 - - Make test262 test runner check for which exception is thrown (issue - 4803). - - Performance and stability improvements on all platforms. - - -2016-03-14: Version 5.1.124 - - Performance and stability improvements on all platforms. - - -2016-03-14: Version 5.1.123 - - Performance and stability improvements on all platforms. - - -2016-03-14: Version 5.1.122 - - Performance and stability improvements on all platforms. - - -2016-03-14: Version 5.1.121 - - Performance and stability improvements on all platforms. - - -2016-03-14: Version 5.1.120 - - Performance and stability improvements on all platforms. - - -2016-03-14: Version 5.1.119 - - Performance and stability improvements on all platforms. - - -2016-03-13: Version 5.1.118 - - Performance and stability improvements on all platforms. - - -2016-03-11: Version 5.1.117 - - Performance and stability improvements on all platforms. - - -2016-03-11: Version 5.1.116 - - Performance and stability improvements on all platforms. - - -2016-03-11: Version 5.1.115 - - Performance and stability improvements on all platforms. - - -2016-03-11: Version 5.1.114 - - [arm64] Fix i/d cache line size confusion typo (Chromium issue 593867). - - Performance and stability improvements on all platforms. - - -2016-03-11: Version 5.1.113 - - Fix expression positions for for-loops (issue 4690). - - Performance and stability improvements on all platforms. - - -2016-03-11: Version 5.1.112 - - Performance and stability improvements on all platforms. - - -2016-03-11: Version 5.1.111 - - Performance and stability improvements on all platforms. - - -2016-03-10: Version 5.1.110 - - Minor library function fixes for TypedArray spec compliance (issue - 4785). - - Check that Promise subclasses have callable resolve/reject (issue 4633). - - Performance and stability improvements on all platforms. - - -2016-03-10: Version 5.1.109 - - Performance and stability improvements on all platforms. - - -2016-03-10: Version 5.1.108 - - Performance and stability improvements on all platforms. - - -2016-03-10: Version 5.1.107 - - Performance and stability improvements on all platforms. - - -2016-03-10: Version 5.1.106 - - Performance and stability improvements on all platforms. - - -2016-03-10: Version 5.1.105 - - Performance and stability improvements on all platforms. - - -2016-03-10: Version 5.1.104 - - Performance and stability improvements on all platforms. - - -2016-03-10: Version 5.1.103 - - Performance and stability improvements on all platforms. - - -2016-03-10: Version 5.1.102 - - Performance and stability improvements on all platforms. - - -2016-03-10: Version 5.1.101 - - Performance and stability improvements on all platforms. - - -2016-03-10: Version 5.1.100 - - [strong] Remove all remainders of strong mode (issue 3956). - - Performance and stability improvements on all platforms. - - -2016-03-10: Version 5.1.99 - - Marks the label associated with the runtime call in - CodeStubAssembler::Allocate as deferred (Chromium issue 593359). - - Performance and stability improvements on all platforms. - - -2016-03-10: Version 5.1.98 - - Implement iterator finalization in array destructuring (issue 3566). - - Performance and stability improvements on all platforms. - - -2016-03-10: Version 5.1.97 - - Performance and stability improvements on all platforms. - - -2016-03-10: Version 5.1.96 - - Performance and stability improvements on all platforms. - - -2016-03-10: Version 5.1.95 - - String.prototype[Symbol.iterator] does RequireObjectCoercible(this) - (issue 4348). - - Stage restrictive declarations flag (issue 4824). - - Expose Array.prototype.values behind a flag and stage it (issue 4247). - - Performance and stability improvements on all platforms. - - -2016-03-09: Version 5.1.94 - - Performance and stability improvements on all platforms. - - -2016-03-09: Version 5.1.93 - - Ensure appropriate bounds checking for Array subclass concat (Chromium - issue 592340). - - Performance and stability improvements on all platforms. - - -2016-03-09: Version 5.1.92 - - Performance and stability improvements on all platforms. - - -2016-03-09: Version 5.1.91 - - Performance and stability improvements on all platforms. - - -2016-03-09: Version 5.1.90 - - Performance and stability improvements on all platforms. - - -2016-03-09: Version 5.1.89 - - Performance and stability improvements on all platforms. - - -2016-03-09: Version 5.1.88 - - Performance and stability improvements on all platforms. - - -2016-03-09: Version 5.1.87 - - Performance and stability improvements on all platforms. - - -2016-03-09: Version 5.1.86 - - Performance and stability improvements on all platforms. - - -2016-03-09: Version 5.1.85 - - Performance and stability improvements on all platforms. - - -2016-03-09: Version 5.1.84 - - Performance and stability improvements on all platforms. - - -2016-03-08: Version 5.1.83 - - Performance and stability improvements on all platforms. - - -2016-03-08: Version 5.1.82 - - Performance and stability improvements on all platforms. - - -2016-03-08: Version 5.1.81 - - Optimize new TypedArray(typedArray) constructor (Chromium issue 592007). - - Ensure the @@species protector is updated for accessors (issue 4093). - - Add UseCounters for various RegExp compatibility issues (Chromium issue - 581577). - - Performance and stability improvements on all platforms. - - -2016-03-08: Version 5.1.80 - - Performance and stability improvements on all platforms. - - -2016-03-08: Version 5.1.79 - - Performance and stability improvements on all platforms. - - -2016-03-08: Version 5.1.78 - - Performance and stability improvements on all platforms. - - -2016-03-08: Version 5.1.77 - - Performance and stability improvements on all platforms. - - -2016-03-08: Version 5.1.76 - - Performance and stability improvements on all platforms. - - -2016-03-08: Version 5.1.75 - - Performance and stability improvements on all platforms. - - -2016-03-08: Version 5.1.74 - - Performance and stability improvements on all platforms. - - -2016-03-08: Version 5.1.73 - - Performance and stability improvements on all platforms. - - -2016-03-08: Version 5.1.72 - - Performance and stability improvements on all platforms. - - -2016-03-08: Version 5.1.71 - - Performance and stability improvements on all platforms. - - -2016-03-07: Version 5.1.70 - - Performance and stability improvements on all platforms. - - -2016-03-07: Version 5.1.69 - - Performance and stability improvements on all platforms. - - -2016-03-07: Version 5.1.68 - - [key-accumulator] Starting to reimplement the key-accumulator (issue - 4758, Chromium issue 545503). - - Performance and stability improvements on all platforms. - - -2016-03-07: Version 5.1.67 - - Performance and stability improvements on all platforms. - - -2016-03-07: Version 5.1.66 - - Performance and stability improvements on all platforms. - - -2016-03-07: Version 5.1.65 - - [key-accumulator] Starting to reimplement the key-accumulator (issue - 4758, Chromium issue 545503). - - Performance and stability improvements on all platforms. - - -2016-03-07: Version 5.1.64 - - Performance and stability improvements on all platforms. - - -2016-03-07: Version 5.1.63 - - Performance and stability improvements on all platforms. - - -2016-03-07: Version 5.1.62 - - Performance and stability improvements on all platforms. - - -2016-03-07: Version 5.1.61 - - Performance and stability improvements on all platforms. - - -2016-03-07: Version 5.1.60 - - Performance and stability improvements on all platforms. - - -2016-03-07: Version 5.1.59 - - Use v8::kGCCallbackFlagCollectAllAvailableGarbage in - Heap::CollectAllAvailableGarbage (Chromium issue 591463). - - [key-accumulator] Starting to reimplement the key-accumulator (issue - 4758, Chromium issue 545503). - - Performance and stability improvements on all platforms. - - -2016-03-07: Version 5.1.58 - - [regexp] Fix off-by-one in CharacterRange::Negate (Chromium issue - 592343). - - Performance and stability improvements on all platforms. - - -2016-03-07: Version 5.1.57 - - Performance and stability improvements on all platforms. - - -2016-03-07: Version 5.1.56 - - Use v8::kGCCallbackFlagCollectAllAvailableGarbage in - Heap::CollectAllAvailableGarbage (Chromium issue 591463). - - Performance and stability improvements on all platforms. - - -2016-03-06: Version 5.1.55 - - Performance and stability improvements on all platforms. - - -2016-03-06: Version 5.1.54 - - Performance and stability improvements on all platforms. - - -2016-03-04: Version 5.1.53 - - Performance and stability improvements on all platforms. - - -2016-03-04: Version 5.1.52 - - Performance and stability improvements on all platforms. - - -2016-03-04: Version 5.1.51 - - Performance and stability improvements on all platforms. - - -2016-03-04: Version 5.1.50 - - Performance and stability improvements on all platforms. - - -2016-03-04: Version 5.1.49 - - Performance and stability improvements on all platforms. - - -2016-03-04: Version 5.1.48 - - Performance and stability improvements on all platforms. - - -2016-03-04: Version 5.1.47 - - Performance and stability improvements on all platforms. - - -2016-03-04: Version 5.1.46 - - Introduce v8::MicrotasksScope (Chromium issue 585949). - - Performance and stability improvements on all platforms. - - -2016-03-04: Version 5.1.45 - - Performance and stability improvements on all platforms. - - -2016-03-04: Version 5.1.44 - - Use a different GCCallbackFlag for GCs triggered by - CollectAllAvailableGarbage (Chromium issue 591463). - - Performance and stability improvements on all platforms. - - -2016-03-04: Version 5.1.43 - - Performance and stability improvements on all platforms. - - -2016-03-04: Version 5.1.42 - - Performance and stability improvements on all platforms. - - -2016-03-04: Version 5.1.41 - - Performance and stability improvements on all platforms. - - -2016-03-04: Version 5.1.40 - - Performance and stability improvements on all platforms. - - -2016-03-04: Version 5.1.39 - - Ship ES2015 Function.name reform (issue 3699, Chromium issue 588803). - - Introduce v8::MicrotasksScope (Chromium issue 585949). - - Performance and stability improvements on all platforms. - - -2016-03-04: Version 5.1.38 - - Performance and stability improvements on all platforms. - - -2016-03-03: Version 5.1.37 - - Restrict FunctionDeclarations in Statement position (issue 4647). - - Performance and stability improvements on all platforms. - - -2016-03-03: Version 5.1.36 - - Performance and stability improvements on all platforms. - - -2016-03-03: Version 5.1.35 - - Performance and stability improvements on all platforms. - - -2016-03-03: Version 5.1.34 - - Performance and stability improvements on all platforms. - - -2016-03-03: Version 5.1.33 - - Performance and stability improvements on all platforms. - - -2016-03-03: Version 5.1.32 - - Performance and stability improvements on all platforms. - - -2016-03-03: Version 5.1.31 - - Performance and stability improvements on all platforms. - - -2016-03-03: Version 5.1.30 - - Implement TypedArray(typedarray) constructor (issue 4726). - - Performance and stability improvements on all platforms. - - -2016-03-02: Version 5.1.29 - - Performance and stability improvements on all platforms. - - -2016-03-02: Version 5.1.28 - - [turbofan] Adds an Allocate macro to the CodeStubAssembler (Chromium - issue 588692). - - Performance and stability improvements on all platforms. - - -2016-03-02: Version 5.1.27 - - Performance and stability improvements on all platforms. - - -2016-03-02: Version 5.1.26 - - Performance and stability improvements on all platforms. - - -2016-03-02: Version 5.1.25 - - Performance and stability improvements on all platforms. - - -2016-03-02: Version 5.1.24 - - Devtools: expose scopes source location to debugger (Chromium issue - 327092). - - CodeStubAssembler can generate code for builtins (issue 4614). - - Performance and stability improvements on all platforms. - - -2016-03-01: Version 5.1.23 - - Performance and stability improvements on all platforms. - - -2016-03-01: Version 5.1.22 - - Performance and stability improvements on all platforms. - - -2016-03-01: Version 5.1.21 - - Performance and stability improvements on all platforms. - - -2016-03-01: Version 5.1.20 - - Performance and stability improvements on all platforms. - - -2016-03-01: Version 5.1.19 - - Performance and stability improvements on all platforms. - - -2016-03-01: Version 5.1.18 - - Performance and stability improvements on all platforms. - - -2016-03-01: Version 5.1.17 - - Performance and stability improvements on all platforms. - - -2016-03-01: Version 5.1.16 - - Performance and stability improvements on all platforms. - - -2016-03-01: Version 5.1.15 - - Performance and stability improvements on all platforms. - - -2016-03-01: Version 5.1.14 - - Performance and stability improvements on all platforms. - - -2016-03-01: Version 5.1.13 - - Performance and stability improvements on all platforms. - - -2016-03-01: Version 5.1.12 - - Performance and stability improvements on all platforms. - - -2016-03-01: Version 5.1.11 - - Make %TypedArray%.from spec-compliant (issue 4782). - - Performance and stability improvements on all platforms. - - -2016-02-29: Version 5.1.10 - - Performance and stability improvements on all platforms. - - -2016-02-29: Version 5.1.9 - - Performance and stability improvements on all platforms. - - -2016-02-28: Version 5.1.8 - - Performance and stability improvements on all platforms. - - -2016-02-28: Version 5.1.7 - - Performance and stability improvements on all platforms. - - -2016-02-28: Version 5.1.6 - - Performance and stability improvements on all platforms. - - -2016-02-28: Version 5.1.5 - - Performance and stability improvements on all platforms. - - -2016-02-28: Version 5.1.4 - - Performance and stability improvements on all platforms. - - -2016-02-28: Version 5.1.3 - - Performance and stability improvements on all platforms. - - -2016-02-28: Version 5.1.2 - - Performance and stability improvements on all platforms. - - -2016-02-27: Version 5.1.1 - - Fix strict mode function error message (issue 2198). - - Reland of Make Intl install properties more like how other builtins do - (patchset #1 id:1 of https://codereview.chromium.org/1733293003/ ) - (issue 4778). - - [turbofan] Bailout if LoadBuffer typing assumption doesn't hold - (Chromium issue 589792). - - Performance and stability improvements on all platforms. - - -2016-02-26: Version 5.0.104 - - Performance and stability improvements on all platforms. - - -2016-02-26: Version 5.0.103 - - Make Intl install properties more like how other builtins do (issue - 4778). - - Performance and stability improvements on all platforms. - - -2016-02-26: Version 5.0.102 - - Make TypedArray.from and TypedArray.of writable and configurable (issue - 4315). - - Performance and stability improvements on all platforms. - - -2016-02-25: Version 5.0.101 - - Performance and stability improvements on all platforms. - - -2016-02-25: Version 5.0.100 - - Ship ES2015 iterator finalization (issue 3566). - - Performance and stability improvements on all platforms. - - -2016-02-25: Version 5.0.99 - - Introduce MicrotasksCompletedCallback (Chromium issue 585949). - - Performance and stability improvements on all platforms. - - -2016-02-25: Version 5.0.98 - - Performance and stability improvements on all platforms. - - -2016-02-25: Version 5.0.97 - - Performance and stability improvements on all platforms. - - -2016-02-25: Version 5.0.96 - - Performance and stability improvements on all platforms. - - -2016-02-25: Version 5.0.95 - - Performance and stability improvements on all platforms. - - -2016-02-25: Version 5.0.94 - - Performance and stability improvements on all platforms. - - -2016-02-25: Version 5.0.93 - - Performance and stability improvements on all platforms. - - -2016-02-25: Version 5.0.92 - - Performance and stability improvements on all platforms. - - -2016-02-25: Version 5.0.91 - - Performance and stability improvements on all platforms. - - -2016-02-25: Version 5.0.90 - - Performance and stability improvements on all platforms. - - -2016-02-25: Version 5.0.89 - - Performance and stability improvements on all platforms. - - -2016-02-25: Version 5.0.88 - - Performance and stability improvements on all platforms. - - -2016-02-25: Version 5.0.87 - - Performance and stability improvements on all platforms. - - -2016-02-25: Version 5.0.86 - - Performance and stability improvements on all platforms. - - -2016-02-25: Version 5.0.85 - - Performance and stability improvements on all platforms. - - -2016-02-24: Version 5.0.84 - - Performance and stability improvements on all platforms. - - -2016-02-24: Version 5.0.83 - - Performance and stability improvements on all platforms. - - -2016-02-24: Version 5.0.82 - - Ensure IteratorClose is called for errors in non-declaring assignments - (issue 4776). - - Fix priority of exceptions being thrown from for-of loops (issue 4775). - - Performance and stability improvements on all platforms. - - -2016-02-24: Version 5.0.81 - - Performance and stability improvements on all platforms. - - -2016-02-24: Version 5.0.80 - - Encode interpreter::SourcePositionTable as variable-length ints (issue - 4690). - - Stage ES2015 iterator finalization (issue 3566). - - Performance and stability improvements on all platforms. - - -2016-02-24: Version 5.0.79 - - Performance and stability improvements on all platforms. - - -2016-02-24: Version 5.0.78 - - Performance and stability improvements on all platforms. - - -2016-02-24: Version 5.0.77 - - Performance and stability improvements on all platforms. - - -2016-02-24: Version 5.0.76 - - Performance and stability improvements on all platforms. - - -2016-02-24: Version 5.0.75 - - Performance and stability improvements on all platforms. - - -2016-02-24: Version 5.0.74 - - Performance and stability improvements on all platforms. - - -2016-02-23: Version 5.0.73 - - Intl: Use private symbols to memoize bound functions (issue 3785). - - Ensure Array.prototype.indexOf returns +0 rather than -0. - - Ship ES2015 Symbol.species (issue 4093). - - Performance and stability improvements on all platforms. - - -2016-02-23: Version 5.0.72 - - Performance and stability improvements on all platforms. - - -2016-02-23: Version 5.0.71 - - Performance and stability improvements on all platforms. - - -2016-02-23: Version 5.0.70 - - Performance and stability improvements on all platforms. - - -2016-02-23: Version 5.0.69 - - Performance and stability improvements on all platforms. - - -2016-02-23: Version 5.0.68 - - Performance and stability improvements on all platforms. - - -2016-02-23: Version 5.0.67 - - Performance and stability improvements on all platforms. - - -2016-02-23: Version 5.0.66 - - Performance and stability improvements on all platforms. - - -2016-02-22: Version 5.0.65 - - Performance and stability improvements on all platforms. - - -2016-02-22: Version 5.0.64 - - ES2015 web compat workaround: RegExp.prototype.flags => "" (Chromium - issue 581577). - - Remove the Proxy enumerate trap (issue 4768). - - Performance and stability improvements on all platforms. - - -2016-02-22: Version 5.0.63 - - Performance and stability improvements on all platforms. - - -2016-02-22: Version 5.0.62 - - Remove Reflect.enumerate (issue 4768). - - Performance and stability improvements on all platforms. - - -2016-02-22: Version 5.0.61 - - Performance and stability improvements on all platforms. - - -2016-02-22: Version 5.0.60 - - Performance and stability improvements on all platforms. - - -2016-02-22: Version 5.0.59 - - Performance and stability improvements on all platforms. - - -2016-02-22: Version 5.0.58 - - Performance and stability improvements on all platforms. - - -2016-02-22: Version 5.0.57 - - Performance and stability improvements on all platforms. - - -2016-02-22: Version 5.0.56 - - Performance and stability improvements on all platforms. - - -2016-02-22: Version 5.0.55 - - Performance and stability improvements on all platforms. - - -2016-02-22: Version 5.0.54 - - Performance and stability improvements on all platforms. - - -2016-02-21: Version 5.0.53 - - Performance and stability improvements on all platforms. - - -2016-02-21: Version 5.0.52 - - Performance and stability improvements on all platforms. - - -2016-02-21: Version 5.0.51 - - Performance and stability improvements on all platforms. - - -2016-02-21: Version 5.0.50 - - Performance and stability improvements on all platforms. - - -2016-02-21: Version 5.0.49 - - Performance and stability improvements on all platforms. - - -2016-02-21: Version 5.0.48 - - Performance and stability improvements on all platforms. - - -2016-02-20: Version 5.0.47 - - Performance and stability improvements on all platforms. - - -2016-02-20: Version 5.0.46 - - Performance and stability improvements on all platforms. - - -2016-02-19: Version 5.0.45 - - Performance and stability improvements on all platforms. - - -2016-02-19: Version 5.0.44 - - Return undefined from RegExp.prototype.compile (Chromium issue 585775). - - Performance and stability improvements on all platforms. - - -2016-02-19: Version 5.0.43 - - Disable --harmony-object-observe (Chromium issue 552100). - - Performance and stability improvements on all platforms. - - -2016-02-19: Version 5.0.42 - - Introduce BeforeCallEnteredCallback (Chromium issue 585949). - - Performance and stability improvements on all platforms. - - -2016-02-19: Version 5.0.41 - - Performance and stability improvements on all platforms. - - -2016-02-19: Version 5.0.40 - - Performance and stability improvements on all platforms. - - -2016-02-19: Version 5.0.39 - - Performance and stability improvements on all platforms. - - -2016-02-19: Version 5.0.38 - - [wasm] Add support for import section (Chromium issue 575167). - - Performance and stability improvements on all platforms. - - -2016-02-19: Version 5.0.37 - - Performance and stability improvements on all platforms. - - -2016-02-19: Version 5.0.36 - - Performance and stability improvements on all platforms. - - -2016-02-19: Version 5.0.35 - - Performance and stability improvements on all platforms. - - -2016-02-19: Version 5.0.34 - - Performance and stability improvements on all platforms. - - -2016-02-19: Version 5.0.33 - - Performance and stability improvements on all platforms. - - -2016-02-19: Version 5.0.32 - - Performance and stability improvements on all platforms. - - -2016-02-19: Version 5.0.31 - - Performance and stability improvements on all platforms. - - -2016-02-19: Version 5.0.30 - - Mark old SetAccessCheckCallback as deprecated. - - Performance and stability improvements on all platforms. - - -2016-02-19: Version 5.0.29 - - Performance and stability improvements on all platforms. - - -2016-02-19: Version 5.0.28 - - Performance and stability improvements on all platforms. - - -2016-02-19: Version 5.0.27 - - Performance and stability improvements on all platforms. - - -2016-02-19: Version 5.0.26 - - Performance and stability improvements on all platforms. - - -2016-02-18: Version 5.0.25 - - Performance and stability improvements on all platforms. - - -2016-02-18: Version 5.0.24 - - Make Date.prototype.toGMTString an alias for Date.prototype.toUTCString - (issue 4708). - - Performance and stability improvements on all platforms. - - -2016-02-18: Version 5.0.23 - - Performance and stability improvements on all platforms. - - -2016-02-18: Version 5.0.22 - - Performance and stability improvements on all platforms. - - -2016-02-18: Version 5.0.21 - - Performance and stability improvements on all platforms. - - -2016-02-18: Version 5.0.20 - - Performance and stability improvements on all platforms. - - -2016-02-18: Version 5.0.19 - - Performance and stability improvements on all platforms. - - -2016-02-18: Version 5.0.18 - - Performance and stability improvements on all platforms. - - -2016-02-18: Version 5.0.17 - - Performance and stability improvements on all platforms. - - -2016-02-18: Version 5.0.16 - - [es6] Implement for-of iterator finalization (issue 2214). - - Performance and stability improvements on all platforms. - - -2016-02-18: Version 5.0.15 - - Performance and stability improvements on all platforms. - - -2016-02-18: Version 5.0.14 - - Use displayName in Error.stack rendering if present (issue 4761). - - Performance and stability improvements on all platforms. - - -2016-02-18: Version 5.0.13 - - Performance and stability improvements on all platforms. - - -2016-02-18: Version 5.0.12 - - Performance and stability improvements on all platforms. - - -2016-02-18: Version 5.0.11 - - Performance and stability improvements on all platforms. - - -2016-02-17: Version 5.0.10 - - [Atomics] Add dmb/dsb/isb instructions to ARM (issue 4614). - - Performance and stability improvements on all platforms. - - -2016-02-17: Version 5.0.9 - - Performance and stability improvements on all platforms. - - -2016-02-17: Version 5.0.8 - - Performance and stability improvements on all platforms. - - -2016-02-17: Version 5.0.7 - - Performance and stability improvements on all platforms. - - -2016-02-17: Version 5.0.6 - - Performance and stability improvements on all platforms. - - -2016-02-17: Version 5.0.5 - - Performance and stability improvements on all platforms. - - -2016-02-17: Version 5.0.4 - - Performance and stability improvements on all platforms. - - -2016-02-17: Version 5.0.3 - - Performance and stability improvements on all platforms. - - -2016-02-17: Version 5.0.2 - - Performance and stability improvements on all platforms. - - -2016-02-17: Version 5.0.1 - - Performance and stability improvements on all platforms. +2016-02-17: Sentinel + The ChangeLog file is no longer maintained on master. This + sentinel should stay on top of this list. 2016-02-17: Version 4.10.253 @@ -7268,12 +5131,6 @@ Performance and stability improvements on all platforms. -2015-05-17: Sentinel - - The ChangeLog file is no longer maintained on bleeding_edge. This - sentinel should stay on top of this list. - - 2015-05-17: Version 4.5.2 Performance and stability improvements on all platforms. diff --git a/deps/v8/DEPS b/deps/v8/DEPS index 0559523283f74e..3a97e142d5393e 100644 --- a/deps/v8/DEPS +++ b/deps/v8/DEPS @@ -3,46 +3,60 @@ # all paths in here must match this assumption. vars = { - "git_url": "https://chromium.googlesource.com", + "chromium_url": "https://chromium.googlesource.com", } deps = { - "v8/build/gyp": - Var("git_url") + "/external/gyp.git" + "@" + "4ec6c4e3a94bd04a6da2858163d40b2429b8aad1", + "v8/build": + Var("chromium_url") + "/chromium/src/build.git" + "@" + "43f8b7b4f8fa2290a3ce9f82544514a8d961e2c3", + "v8/tools/gyp": + Var("chromium_url") + "/external/gyp.git" + "@" + "702ac58e477214c635d9b541932e75a95d349352", "v8/third_party/icu": - Var("git_url") + "/chromium/deps/icu.git" + "@" + "c291cde264469b20ca969ce8832088acb21e0c48", + Var("chromium_url") + "/chromium/deps/icu.git" + "@" + "b0bd3ee50bc2e768d7a17cbc60d87f517f024dbe", + "v8/third_party/instrumented_libraries": + Var("chromium_url") + "/chromium/src/third_party/instrumented_libraries.git" + "@" + "45f5814b1543e41ea0be54c771e3840ea52cca4a", "v8/buildtools": - Var("git_url") + "/chromium/buildtools.git" + "@" + "80b5126f91be4eb359248d28696746ef09d5be67", + Var("chromium_url") + "/chromium/buildtools.git" + "@" + "86f7e41d9424b9d8faf66c601b129855217f9a08", "v8/base/trace_event/common": - Var("git_url") + "/chromium/src/base/trace_event/common.git" + "@" + "c8c8665c2deaf1cc749d9f8e153256d4f67bf1b8", + Var("chromium_url") + "/chromium/src/base/trace_event/common.git" + "@" + "6232c13e4edb36c84c61653fdae5a4afb5af9745", + "v8/third_party/WebKit/Source/platform/inspector_protocol": + Var("chromium_url") + "/chromium/src/third_party/WebKit/Source/platform/inspector_protocol.git" + "@" + "e240fdcdb5880deb48156dbb9ccee0c28664cf88", + "v8/third_party/jinja2": + Var("chromium_url") + "/chromium/src/third_party/jinja2.git" + "@" + "b61a2c009a579593a259c1b300e0ad02bf48fd78", + "v8/third_party/markupsafe": + Var("chromium_url") + "/chromium/src/third_party/markupsafe.git" + "@" + "484a5661041cac13bfc688a26ec5434b05d18961", "v8/tools/swarming_client": - Var('git_url') + '/external/swarming.client.git' + '@' + "df6e95e7669883c8fe9ef956c69a544154701a49", + Var('chromium_url') + '/external/swarming.client.git' + '@' + "380e32662312eb107f06fcba6409b0409f8fef72", "v8/testing/gtest": - Var("git_url") + "/external/github.com/google/googletest.git" + "@" + "6f8a66431cb592dad629028a50b3dd418a408c87", + Var("chromium_url") + "/external/github.com/google/googletest.git" + "@" + "6f8a66431cb592dad629028a50b3dd418a408c87", "v8/testing/gmock": - Var("git_url") + "/external/googlemock.git" + "@" + "0421b6f358139f02e102c9c332ce19a33faf75be", + Var("chromium_url") + "/external/googlemock.git" + "@" + "0421b6f358139f02e102c9c332ce19a33faf75be", "v8/test/benchmarks/data": - Var("git_url") + "/v8/deps/third_party/benchmarks.git" + "@" + "05d7188267b4560491ff9155c5ee13e207ecd65f", + Var("chromium_url") + "/v8/deps/third_party/benchmarks.git" + "@" + "05d7188267b4560491ff9155c5ee13e207ecd65f", "v8/test/mozilla/data": - Var("git_url") + "/v8/deps/third_party/mozilla-tests.git" + "@" + "f6c578a10ea707b1a8ab0b88943fe5115ce2b9be", - "v8/test/simdjs/data": Var("git_url") + "/external/github.com/tc39/ecmascript_simd.git" + "@" + "c8ef63c728283debc25891123eb00482fee4b8cd", + Var("chromium_url") + "/v8/deps/third_party/mozilla-tests.git" + "@" + "f6c578a10ea707b1a8ab0b88943fe5115ce2b9be", + "v8/test/simdjs/data": Var("chromium_url") + "/external/github.com/tc39/ecmascript_simd.git" + "@" + "baf493985cb9ea7cdbd0d68704860a8156de9556", "v8/test/test262/data": - Var("git_url") + "/external/github.com/tc39/test262.git" + "@" + "57d3e2216fa86ad63b6c0a54914ba9dcbff96003", + Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "29c23844494a7cc2fbebc6948d2cb0bcaddb24e7", + "v8/test/test262/harness": + Var("chromium_url") + "/external/github.com/test262-utils/test262-harness-py.git" + "@" + "cbd968f54f7a95c6556d53ba852292a4c49d11d8", "v8/tools/clang": - Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "faee82e064e04e5cbf60cc7327e7a81d2a4557ad", + Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "204eee93d4ffbc01d6182e4c199960db763c7dbc", } deps_os = { "android": { "v8/third_party/android_tools": - Var("git_url") + "/android_tools.git" + "@" + "adfd31794011488cd0fc716b53558b2d8a67af8b", + Var("chromium_url") + "/android_tools.git" + "@" + "25d57ead05d3dfef26e9c19b13ed10b0a69829cf", }, "win": { "v8/third_party/cygwin": - Var("git_url") + "/chromium/deps/cygwin.git" + "@" + "c89e446b273697fadf3a10ff1007a97c0b7de6df", + Var("chromium_url") + "/chromium/deps/cygwin.git" + "@" + "c89e446b273697fadf3a10ff1007a97c0b7de6df", } } +recursedeps = [ 'v8/third_party/android_tools' ] + include_rules = [ # Everybody can use some things. "+include", @@ -53,6 +67,7 @@ include_rules = [ # checkdeps.py shouldn't check for includes in these directories: skip_child_includes = [ "build", + "gypfiles", "third_party", ] @@ -65,7 +80,7 @@ hooks = [ 'pattern': '.', 'action': [ 'python', - 'v8/build/landmines.py', + 'v8/gypfiles/landmines.py', ], }, # Pull clang-format binaries using checked-in hashes. @@ -186,11 +201,66 @@ hooks = [ "-s", "v8/buildtools/linux64/gn.sha1", ], }, + { + "name": "wasm_fuzzer", + "pattern": ".", + "action": [ "download_from_google_storage", + "--no_resume", + "--no_auth", + "-u", + "--bucket", "v8-wasm-fuzzer", + "-s", "v8/test/fuzzer/wasm.tar.gz.sha1", + ], + }, + { + "name": "wasm_asmjs_fuzzer", + "pattern": ".", + "action": [ "download_from_google_storage", + "--no_resume", + "--no_auth", + "-u", + "--bucket", "v8-wasm-asmjs-fuzzer", + "-s", "v8/test/fuzzer/wasm_asmjs.tar.gz.sha1", + ], + }, + { + "name": "closure_compiler", + "pattern": ".", + "action": [ "download_from_google_storage", + "--no_resume", + "--no_auth", + "-u", + "--bucket", "chromium-v8-closure-compiler", + "-s", "v8/src/inspector/build/closure-compiler.tar.gz.sha1", + ], + }, + { + # Downloads the current stable linux sysroot to build/linux/ if needed. + # This sysroot updates at about the same rate that the chrome build deps + # change. + 'name': 'sysroot', + 'pattern': '.', + 'action': [ + 'python', + 'v8/build/linux/sysroot_scripts/install-sysroot.py', + '--running-as-hook', + ], + }, + { + # Pull sanitizer-instrumented third-party libraries if requested via + # GYP_DEFINES. + 'name': 'instrumented_libraries', + 'pattern': '\\.sha1', + 'action': [ + 'python', + 'v8/third_party/instrumented_libraries/scripts/download_binaries.py', + ], + }, { # Update the Windows toolchain if necessary. 'name': 'win_toolchain', 'pattern': '.', - 'action': ['python', 'v8/build/vs_toolchain.py', 'update'], + 'action': ['python', 'v8/gypfiles/vs_toolchain.py', 'update'], }, # Pull binutils for linux, enabled debug fission for faster linking / # debugging when used with clang on Ubuntu Precise. @@ -208,7 +278,7 @@ hooks = [ # Note: This must run before the clang update. 'name': 'gold_plugin', 'pattern': '.', - 'action': ['python', 'v8/build/download_gold_plugin.py'], + 'action': ['python', 'v8/gypfiles/download_gold_plugin.py'], }, { # Pull clang if needed or requested via GYP_DEFINES. @@ -220,6 +290,6 @@ hooks = [ { # A change to a .gyp, .gypi, or to GYP itself should run the generator. "pattern": ".", - "action": ["python", "v8/build/gyp_v8"], + "action": ["python", "v8/gypfiles/gyp_v8", "--running-as-hook"], }, ] diff --git a/deps/v8/src/third_party/fdlibm/LICENSE b/deps/v8/LICENSE.fdlibm similarity index 100% rename from deps/v8/src/third_party/fdlibm/LICENSE rename to deps/v8/LICENSE.fdlibm diff --git a/deps/v8/Makefile b/deps/v8/Makefile index a0c08a6d9634c7..a6d4d135da3da1 100644 --- a/deps/v8/Makefile +++ b/deps/v8/Makefile @@ -33,7 +33,6 @@ GYPFLAGS ?= TESTFLAGS ?= ANDROID_NDK_HOST_ARCH ?= ANDROID_V8 ?= /data/local/tmp/v8 -NACL_SDK_ROOT ?= # Special build flags. Use them like this: "make library=shared" @@ -122,10 +121,6 @@ endif ifeq ($(werror), no) GYPFLAGS += -Dwerror='' endif -# presubmit=no -ifeq ($(presubmit), no) - TESTFLAGS += --no-presubmit -endif # strictaliasing=off (workaround for GCC-4.5) ifeq ($(strictaliasing), off) GYPFLAGS += -Dv8_no_strict_aliasing=1 @@ -227,6 +222,11 @@ ifeq ($(no_omit_framepointer), on) GYPFLAGS += -Drelease_extra_cflags=-fno-omit-frame-pointer endif +ifdef android_ndk_root + GYPFLAGS += -Dandroid_ndk_root=$(android_ndk_root) + export ANDROID_NDK_ROOT = $(android_ndk_root) +endif + # ----------------- available targets: -------------------- # - "grokdump": rebuilds heap constants lists used by grokdump # - any arch listed in ARCHES (see below) @@ -235,7 +235,6 @@ endif # - "native": current host's architecture, release mode # - any of the above with .check appended, e.g. "ia32.release.check" # - "android": cross-compile for Android/ARM -# - "nacl" : cross-compile for Native Client (ia32 and x64) # - default (no target specified): build all DEFAULT_ARCHES and MODES # - "check": build all targets and run all tests # - ".clean" for any in ARCHES @@ -245,21 +244,22 @@ endif # Architectures and modes to be compiled. Consider these to be internal # variables, don't override them (use the targets instead). -ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64 mips64el x87 ppc ppc64 \ - s390 s390x +ARCHES = ia32 x64 arm arm64 mips mipsel mips64 mips64el x87 ppc ppc64 s390 \ + s390x +ARCHES32 = ia32 arm mips mipsel x87 ppc s390 DEFAULT_ARCHES = ia32 x64 arm MODES = release debug optdebug DEFAULT_MODES = release debug ANDROID_ARCHES = android_ia32 android_x64 android_arm android_arm64 \ android_mipsel android_x87 -NACL_ARCHES = nacl_ia32 nacl_x64 # List of files that trigger Makefile regeneration: GYPFILES = third_party/icu/icu.gypi third_party/icu/icu.gyp \ - build/shim_headers.gypi build/features.gypi build/standalone.gypi \ - build/toolchain.gypi build/all.gyp build/mac/asan.gyp \ + gypfiles/shim_headers.gypi gypfiles/features.gypi \ + gypfiles/standalone.gypi \ + gypfiles/toolchain.gypi gypfiles/all.gyp gypfiles/mac/asan.gyp \ test/cctest/cctest.gyp test/fuzzer/fuzzer.gyp \ - test/unittests/unittests.gyp tools/gyp/v8.gyp \ + test/unittests/unittests.gyp src/v8.gyp \ tools/parser-shell.gyp testing/gmock.gyp testing/gtest.gyp \ buildtools/third_party/libc++abi/libc++abi.gyp \ buildtools/third_party/libc++/libc++.gyp samples/samples.gyp \ @@ -273,13 +273,10 @@ endif BUILDS = $(foreach mode,$(MODES),$(addsuffix .$(mode),$(ARCHES))) ANDROID_BUILDS = $(foreach mode,$(MODES), \ $(addsuffix .$(mode),$(ANDROID_ARCHES))) -NACL_BUILDS = $(foreach mode,$(MODES), \ - $(addsuffix .$(mode),$(NACL_ARCHES))) # Generates corresponding test targets, e.g. "ia32.release.check". CHECKS = $(addsuffix .check,$(BUILDS)) QUICKCHECKS = $(addsuffix .quickcheck,$(BUILDS)) ANDROID_CHECKS = $(addsuffix .check,$(ANDROID_BUILDS)) -NACL_CHECKS = $(addsuffix .check,$(NACL_BUILDS)) # File where previously used GYPFLAGS are stored. ENVFILE = $(OUTDIR)/environment @@ -288,9 +285,7 @@ ENVFILE = $(OUTDIR)/environment $(addsuffix .quickcheck,$(MODES)) $(addsuffix .quickcheck,$(ARCHES)) \ $(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \ $(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES)) \ - $(ANDROID_ARCHES) $(ANDROID_BUILDS) $(ANDROID_CHECKS) \ - $(NACL_ARCHES) $(NACL_BUILDS) $(NACL_CHECKS) \ - must-set-NACL_SDK_ROOT + $(ANDROID_ARCHES) $(ANDROID_BUILDS) $(ANDROID_CHECKS) # Target definitions. "all" is the default. all: $(DEFAULT_MODES) @@ -329,16 +324,6 @@ $(ANDROID_BUILDS): $(GYPFILES) $(ENVFILE) Makefile.android OUTDIR="$(OUTDIR)" \ GYPFLAGS="$(GYPFLAGS)" -$(NACL_ARCHES): $(addprefix $$@.,$(MODES)) - -$(NACL_BUILDS): $(GYPFILES) $(ENVFILE) \ - Makefile.nacl must-set-NACL_SDK_ROOT - @$(MAKE) -f Makefile.nacl $@ \ - ARCH="$(basename $@)" \ - MODE="$(subst .,,$(suffix $@))" \ - OUTDIR="$(OUTDIR)" \ - GYPFLAGS="$(GYPFLAGS)" - # Test targets. check: all @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \ @@ -382,15 +367,6 @@ $(addsuffix .check, $(ANDROID_BUILDS)): $$(basename $$@).sync $(addsuffix .check, $(ANDROID_ARCHES)): \ $(addprefix $$(basename $$@).,$(MODES)).check -$(addsuffix .check, $(NACL_BUILDS)): $$(basename $$@) - @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \ - --arch-and-mode=$(basename $@) \ - --timeout=600 --nopresubmit --noi18n \ - --command-prefix="tools/nacl-run.py" - -$(addsuffix .check, $(NACL_ARCHES)): \ - $(addprefix $$(basename $$@).,$(MODES)).check - native.check: native @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR)/native \ --arch-and-mode=. $(TESTFLAGS) @@ -420,7 +396,7 @@ turbocheck: $(subst $(COMMA),$(SPACE),$(FASTCOMPILEMODES)) tc: turbocheck # Clean targets. You can clean each architecture individually, or everything. -$(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES) $(NACL_ARCHES)): +$(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES)): rm -f $(OUTDIR)/Makefile.$(basename $@)* rm -rf $(OUTDIR)/$(basename $@).release rm -rf $(OUTDIR)/$(basename $@).debug @@ -432,7 +408,7 @@ native.clean: rm -rf $(OUTDIR)/native find $(OUTDIR) -regex '.*\(host\|target\)\.native\.mk' -delete -clean: $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES) $(NACL_ARCHES)) native.clean gtags.clean +clean: $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES)) native.clean gtags.clean tags.clean # GYP file generation targets. OUT_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(BUILDS)) @@ -441,34 +417,28 @@ $(OUT_MAKEFILES): $(GYPFILES) $(ENVFILE) cut -f 2 -d " " | cut -f 1 -d "-" )) $(eval CXX_TARGET_ARCH:=$(subst aarch64,arm64,$(CXX_TARGET_ARCH))) $(eval CXX_TARGET_ARCH:=$(subst x86_64,x64,$(CXX_TARGET_ARCH))) + $(eval CXX_TARGET_ARCH:=$(subst s390x,s390,$(CXX_TARGET_ARCH))) + $(eval CXX_TARGET_ARCH:=$(subst powerpc,ppc,$(CXX_TARGET_ARCH))) + $(eval CXX_TARGET_ARCH:=$(subst ppc64,ppc,$(CXX_TARGET_ARCH))) + $(eval CXX_TARGET_ARCH:=$(subst ppcle,ppc,$(CXX_TARGET_ARCH))) $(eval V8_TARGET_ARCH:=$(subst .,,$(suffix $(basename $@)))) - PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH):$(shell pwd)/build/gyp/pylib:$(PYTHONPATH)" \ + PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/gypfiles:$(PYTHONPATH):$(shell pwd)/tools/gyp/pylib:$(PYTHONPATH)" \ GYP_GENERATORS=make \ - build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \ - -Ibuild/standalone.gypi --depth=. \ + tools/gyp/gyp --generator-output="$(OUTDIR)" gypfiles/all.gyp \ + -Igypfiles/standalone.gypi --depth=. \ -Dv8_target_arch=$(V8_TARGET_ARCH) \ $(if $(findstring $(CXX_TARGET_ARCH),$(V8_TARGET_ARCH)), \ - -Dtarget_arch=$(V8_TARGET_ARCH),) \ + -Dtarget_arch=$(V8_TARGET_ARCH), \ + $(if $(shell echo $(ARCHES32) | grep $(V8_TARGET_ARCH)), \ + -Dtarget_arch=ia32,)) \ $(if $(findstring optdebug,$@),-Dv8_optimized_debug=1,) \ -S$(suffix $(basename $@))$(suffix $@) $(GYPFLAGS) $(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE) - PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH):$(shell pwd)/build/gyp/pylib:$(PYTHONPATH)" \ + PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/gypfiles:$(PYTHONPATH):$(shell pwd)/tools/gyp/pylib:$(PYTHONPATH)" \ GYP_GENERATORS=make \ - build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \ - -Ibuild/standalone.gypi --depth=. -S.native $(GYPFLAGS) - -# Note that NACL_SDK_ROOT must be set to point to an appropriate -# Native Client SDK before using this makefile. You can download -# an SDK here: -# https://developers.google.com/native-client/sdk/download -# The path indicated by NACL_SDK_ROOT will typically end with -# a folder for a pepper version such as "pepper_25" that should -# have "tools" and "toolchain" subdirectories. -must-set-NACL_SDK_ROOT: -ifndef NACL_SDK_ROOT - $(error NACL_SDK_ROOT must be set) -endif + tools/gyp/gyp --generator-output="$(OUTDIR)" gypfiles/all.gyp \ + -Igypfiles/standalone.gypi --depth=. -S.native $(GYPFLAGS) # Replaces the old with the new environment file if they're different, which # will trigger GYP to regenerate Makefiles. @@ -497,11 +467,21 @@ gtags.files: $(GYPFILES) $(ENVFILE) # We need to manually set the stack limit here, to work around bugs in # gmake-3.81 and global-5.7.1 on recent 64-bit Linux systems. -GPATH GRTAGS GSYMS GTAGS: gtags.files $(shell cat gtags.files 2> /dev/null) +# Using $(wildcard ...) gracefully ignores non-existing files, so that stale +# gtags.files after switching branches don't cause recipe failures. +GPATH GRTAGS GSYMS GTAGS: gtags.files $(wildcard $(shell cat gtags.files 2> /dev/null)) @bash -c 'ulimit -s 10240 && GTAGSFORCECPP=yes gtags -i -q -f $<' gtags.clean: rm -f gtags.files GPATH GRTAGS GSYMS GTAGS +tags: gtags.files $(wildcard $(shell cat gtags.files 2> /dev/null)) + @(ctags --version | grep 'Exuberant Ctags' >/dev/null) || \ + (echo "Please install Exuberant Ctags (check 'ctags --version')" >&2; false) + ctags --fields=+l -L $< + +tags.clean: + rm -r tags + dependencies builddeps: $(error Use 'gclient sync' instead) diff --git a/deps/v8/Makefile.android b/deps/v8/Makefile.android index c49cb85b9b6514..417152177d73c2 100644 --- a/deps/v8/Makefile.android +++ b/deps/v8/Makefile.android @@ -66,7 +66,7 @@ ANDROID_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ANDROID_BUILDS)) $(ANDROID_MAKEFILES): GYP_GENERATORS=make-android \ GYP_DEFINES="${DEFINES}" \ - PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH)" \ - build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \ - -Ibuild/standalone.gypi --depth=. \ + PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/gypfiles:$(PYTHONPATH)" \ + tools/gyp/gyp --generator-output="${OUTDIR}" gypfiles/all.gyp \ + -Igypfiles/standalone.gypi --depth=. \ -S$(suffix $(basename $@))$(suffix $@) ${GYPFLAGS} diff --git a/deps/v8/Makefile.nacl b/deps/v8/Makefile.nacl deleted file mode 100644 index 3459c42c0d865c..00000000000000 --- a/deps/v8/Makefile.nacl +++ /dev/null @@ -1,97 +0,0 @@ -# -# Copyright 2013 the V8 project authors. All rights reserved. -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# Those definitions should be consistent with the main Makefile -NACL_ARCHES = nacl_ia32 nacl_x64 -MODES = release debug - -# Generates all combinations of NACL ARCHES and MODES, -# e.g. "nacl_ia32.release" or "nacl_x64.release" -NACL_BUILDS = $(foreach mode,$(MODES), \ - $(addsuffix .$(mode),$(NACL_ARCHES))) - -HOST_OS = $(shell uname -s | sed -e 's/Linux/linux/;s/Darwin/mac/') -TOOLCHAIN_PATH = $(realpath ${NACL_SDK_ROOT}/toolchain) -NACL_TOOLCHAIN ?= ${TOOLCHAIN_PATH}/linux_pnacl - -ifeq ($(wildcard $(NACL_TOOLCHAIN)),) - $(error Cannot find Native Client toolchain in "${NACL_TOOLCHAIN}") -endif - -ifeq ($(ARCH), nacl_ia32) - GYPENV = nacl_target_arch=nacl_ia32 v8_target_arch=arm v8_host_arch=ia32 - NACL_CC = "$(NACL_TOOLCHAIN)/bin/pnacl-clang" - NACL_CXX = "$(NACL_TOOLCHAIN)/bin/pnacl-clang++" - NACL_LINK = "$(NACL_TOOLCHAIN)/bin/pnacl-clang++ --pnacl-allow-native -arch x86-32" -else - ifeq ($(ARCH), nacl_x64) - GYPENV = nacl_target_arch=nacl_x64 v8_target_arch=arm v8_host_arch=ia32 - NACL_CC = "$(NACL_TOOLCHAIN)/bin/pnacl-clang" - NACL_CXX = "$(NACL_TOOLCHAIN)/bin/pnacl-clang++" - NACL_LINK = "$(NACL_TOOLCHAIN)/bin/pnacl-clang++ --pnacl-allow-native -arch x86-64" - else - $(error Target architecture "${ARCH}" is not supported) - endif -endif - -# For mksnapshot host generation. -GYPENV += host_os=${HOST_OS} - -# ICU doesn't support NaCl. -GYPENV += v8_enable_i18n_support=0 - -# Disable strict aliasing - v8 code often relies on undefined behavior of C++. -GYPENV += v8_no_strict_aliasing=1 - -NACL_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(NACL_BUILDS)) -.SECONDEXPANSION: -# For some reason the $$(basename $$@) expansion didn't work here... -$(NACL_BUILDS): $(NACL_MAKEFILES) - @$(MAKE) -C "$(OUTDIR)" -f Makefile.$@ \ - CC=${NACL_CC} \ - CXX=${NACL_CXX} \ - AR="$(NACL_TOOLCHAIN)/bin/pnacl-ar" \ - RANLIB="$(NACL_TOOLCHAIN)/bin/pnacl-ranlib" \ - LD="$(NACL_TOOLCHAIN)/bin/pnacl-ld" \ - LINK=${NACL_LINK} \ - BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \ - python -c "print raw_input().capitalize()") \ - builddir="$(shell pwd)/$(OUTDIR)/$@" - -# NACL GYP file generation targets. -$(NACL_MAKEFILES): - GYP_GENERATORS=make \ - GYP_DEFINES="${GYPENV}" \ - CC=${NACL_CC} \ - CXX=${NACL_CXX} \ - LINK=${NACL_LINK} \ - PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH)" \ - build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \ - -Ibuild/standalone.gypi --depth=. \ - -S$(suffix $(basename $@))$(suffix $@) $(GYPFLAGS) \ - -Dwno_array_bounds=-Wno-array-bounds diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS index 3f2caecd498284..028f4ff12c5c89 100644 --- a/deps/v8/OWNERS +++ b/deps/v8/OWNERS @@ -1,6 +1,9 @@ adamk@chromium.org ahaas@chromium.org +bbudge@chromium.org +binji@chromium.org bmeurer@chromium.org +bradnelson@chromium.org cbruni@chromium.org danno@chromium.org epertoso@chromium.org @@ -15,10 +18,10 @@ machenbach@chromium.org marja@chromium.org mlippautz@chromium.org mstarzinger@chromium.org +mtrofin@chromium.org mvstanton@chromium.org mythria@chromium.org neis@chromium.org -oth@chromium.org rmcilroy@chromium.org rossberg@chromium.org titzer@chromium.org diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py index f8516afc44ef50..78e7482efbab5d 100644 --- a/deps/v8/PRESUBMIT.py +++ b/deps/v8/PRESUBMIT.py @@ -216,6 +216,38 @@ def FilterFile(affected_file): return [] +def _CheckMissingFiles(input_api, output_api): + """Runs verify_source_deps.py to ensure no files were added that are not in + GN. + """ + # We need to wait until we have an input_api object and use this + # roundabout construct to import checkdeps because this file is + # eval-ed and thus doesn't have __file__. + original_sys_path = sys.path + try: + sys.path = sys.path + [input_api.os_path.join( + input_api.PresubmitLocalPath(), 'tools')] + from verify_source_deps import missing_gn_files, missing_gyp_files + finally: + # Restore sys.path to what it was before. + sys.path = original_sys_path + + gn_files = missing_gn_files() + gyp_files = missing_gyp_files() + results = [] + if gn_files: + results.append(output_api.PresubmitError( + "You added one or more source files but didn't update the\n" + "corresponding BUILD.gn files:\n", + gn_files)) + if gyp_files: + results.append(output_api.PresubmitError( + "You added one or more source files but didn't update the\n" + "corresponding gyp files:\n", + gyp_files)) + return results + + def _CommonChecks(input_api, output_api): """Checks common to both upload and commit.""" results = [] @@ -223,12 +255,15 @@ def _CommonChecks(input_api, output_api): input_api, output_api, source_file_filter=None)) results.extend(input_api.canned_checks.CheckPatchFormatted( input_api, output_api)) + results.extend(input_api.canned_checks.CheckGenderNeutral( + input_api, output_api)) results.extend(_V8PresubmitChecks(input_api, output_api)) results.extend(_CheckUnwantedDependencies(input_api, output_api)) results.extend( _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api)) results.extend( _CheckNoInlineHeaderIncludesInNormalHeaders(input_api, output_api)) + results.extend(_CheckMissingFiles(input_api, output_api)) return results @@ -242,32 +277,15 @@ def _SkipTreeCheck(input_api, output_api): return input_api.environ.get('PRESUBMIT_TREE_CHECK') == 'skip' -def _CheckChangeLogFlag(input_api, output_api, warn): - """Checks usage of LOG= flag in the commit message.""" - results = [] - if (input_api.change.BUG and input_api.change.BUG != 'none' and - not 'LOG' in input_api.change.tags): - text = ('An issue reference (BUG=) requires a change log flag (LOG=). ' - 'Use LOG=Y for including this commit message in the change log. ' - 'Use LOG=N or leave blank otherwise.') - if warn: - results.append(output_api.PresubmitPromptWarning(text)) - else: - results.append(output_api.PresubmitError(text)) - return results - - def CheckChangeOnUpload(input_api, output_api): results = [] results.extend(_CommonChecks(input_api, output_api)) - results.extend(_CheckChangeLogFlag(input_api, output_api, True)) return results def CheckChangeOnCommit(input_api, output_api): results = [] results.extend(_CommonChecks(input_api, output_api)) - results.extend(_CheckChangeLogFlag(input_api, output_api, False)) results.extend(input_api.canned_checks.CheckChangeHasDescription( input_api, output_api)) if not _SkipTreeCheck(input_api, output_api): diff --git a/deps/v8/WATCHLISTS b/deps/v8/WATCHLISTS index 29b957b0917f00..40ce3d865dc2a0 100644 --- a/deps/v8/WATCHLISTS +++ b/deps/v8/WATCHLISTS @@ -33,9 +33,6 @@ { 'WATCHLIST_DEFINITIONS': { - 'public_api': { - 'filepath': 'include/', - }, 'snapshot': { 'filepath': 'src/snapshot/', }, @@ -44,7 +41,6 @@ }, 'interpreter': { 'filepath': 'src/interpreter/' \ - '|src/compiler/interpreter' \ '|src/compiler/bytecode' \ '|test/cctest/interpreter/' \ '|test/unittests/interpreter/', @@ -60,13 +56,16 @@ }, 'ia32': { 'filepath': '/ia32/', - } + }, + 'merges': { + 'filepath': '.', + }, + 'gypfiles': { + 'filepath': 'gypfiles/', + }, }, 'WATCHLISTS': { - 'public_api': [ - 'phajdan.jr@chromium.org', - ], 'snapshot': [ 'yangguo@chromium.org', ], @@ -75,7 +74,6 @@ ], 'interpreter': [ 'rmcilroy@chromium.org', - 'oth@chromium.org', ], 'feature_shipping_status': [ 'hablich@chromium.org', @@ -91,5 +89,12 @@ 'ia32': [ 'v8-x87-ports@googlegroups.com', ], + 'merges': [ + # Only enabled on branches created with tools/release/create_release.py + # 'v8-merges@googlegroups.com', + ], + 'gypfiles': [ + 'machenbach@chromium.org', + ], }, } diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h index 7a1533ed822e88..87588f5749706b 100644 --- a/deps/v8/base/trace_event/common/trace_event_common.h +++ b/deps/v8/base/trace_event/common/trace_event_common.h @@ -308,8 +308,8 @@ TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(0) #define TRACE_EVENT_SET_SAMPLING_STATE(category, name) \ TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(0, category, name) -#define TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(categoryAndName) \ - TRACE_EVENT_SET_NONCONST_SAMPLING_STATE_FOR_BUCKET(0, categoryAndName) +#define TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(category_and_name) \ + TRACE_EVENT_SET_NONCONST_SAMPLING_STATE_FOR_BUCKET(0, category_and_name) // Records a single BEGIN event called "name" immediately, with 0, 1 or 2 // associated arguments. If the category is not enabled, then this @@ -395,6 +395,11 @@ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \ arg2_name, arg2_val) +#define TRACE_EVENT_MARK_WITH_TIMESTAMP0(category_group, name, timestamp) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_MARK, category_group, name, 0, 0, timestamp, \ + TRACE_EVENT_FLAG_NONE) + #define TRACE_EVENT_MARK_WITH_TIMESTAMP1(category_group, name, timestamp, \ arg1_name, arg1_val) \ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ @@ -612,6 +617,13 @@ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \ arg1_name, arg1_val) +#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP2(category_group, name, id, \ + timestamp, arg1_name, \ + arg1_val, arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \ + TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \ + arg1_name, arg1_val, arg2_name, arg2_val) #define TRACE_EVENT_COPY_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, id, \ timestamp) \ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ @@ -701,6 +713,13 @@ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \ arg1_name, arg1_val) +#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP2(category_group, name, id, \ + timestamp, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \ + TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \ + arg1_name, arg1_val, arg2_name, arg2_val) // NESTABLE_ASYNC_* APIs are used to describe an async operation, which can // be nested within a NESTABLE_ASYNC event and/or have inner NESTABLE_ASYNC @@ -760,16 +779,19 @@ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val) // Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately, -// with one associated argument. If the category is not enabled, then this -// does nothing. +// with none, one or two associated argument. If the category is not enabled, +// then this does nothing. +#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT0(category_group, name, id) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_NONE) + #define TRACE_EVENT_NESTABLE_ASYNC_INSTANT1(category_group, name, id, \ arg1_name, arg1_val) \ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \ category_group, name, id, \ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val) -// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately, -// with 2 associated arguments. If the category is not enabled, then this -// does nothing. + #define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2( \ category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ @@ -814,15 +836,6 @@ TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY) -// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately, -// with 2 associated arguments. If the category is not enabled, then this -// does nothing. -#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2( \ - category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \ - INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ - TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, category_group, name, id, \ - TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val) - // Records a single FLOW_BEGIN event called "name" immediately, with 0, 1 or 2 // associated arguments. If the category is not enabled, then this // does nothing. @@ -928,12 +941,8 @@ // Special trace event macro to trace task execution with the location where it // was posted from. -#define TRACE_TASK_EXECUTION(run_function, task) \ - TRACE_EVENT2("toplevel", run_function, "src_file", \ - (task).posted_from.file_name(), "src_func", \ - (task).posted_from.function_name()); \ - TRACE_EVENT_API_SCOPED_TASK_EXECUTION_EVENT INTERNAL_TRACE_EVENT_UID( \ - task_event)((task).posted_from.file_name()); +#define TRACE_TASK_EXECUTION(run_function, task) \ + INTERNAL_TRACE_TASK_EXECUTION(run_function, task) // TRACE_EVENT_METADATA* events are information related to other // injected events, not events in their own right. @@ -948,48 +957,58 @@ #define TRACE_EVENT_CLOCK_SYNC_ISSUER(sync_id, issue_ts, issue_end_ts) \ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \ TRACE_EVENT_PHASE_CLOCK_SYNC, "__metadata", "clock_sync", \ - issue_end_ts.ToInternalValue(), TRACE_EVENT_FLAG_NONE, \ - "sync_id", sync_id, "issue_ts", issue_ts.ToInternalValue()) + issue_end_ts, TRACE_EVENT_FLAG_NONE, \ + "sync_id", sync_id, "issue_ts", issue_ts) // Macros to track the life time and value of arbitrary client objects. // See also TraceTrackableObject. #define TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group, name, id) \ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ - TRACE_EVENT_PHASE_CREATE_OBJECT, category_group, name, \ - TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE) + TRACE_EVENT_PHASE_CREATE_OBJECT, category_group, name, id, \ + TRACE_EVENT_FLAG_NONE) #define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category_group, name, id, \ snapshot) \ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \ - TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE, "snapshot", snapshot) + id, TRACE_EVENT_FLAG_NONE, "snapshot", snapshot) -#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID_AND_TIMESTAMP( \ - category_group, name, id, timestamp, snapshot) \ - INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ - TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \ - TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, \ - TRACE_EVENT_FLAG_NONE, "snapshot", snapshot) +#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID_AND_TIMESTAMP( \ + category_group, name, id, timestamp, snapshot) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \ + id, TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \ + "snapshot", snapshot) #define TRACE_EVENT_OBJECT_DELETED_WITH_ID(category_group, name, id) \ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ - TRACE_EVENT_PHASE_DELETE_OBJECT, category_group, name, \ - TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE) + TRACE_EVENT_PHASE_DELETE_OBJECT, category_group, name, id, \ + TRACE_EVENT_FLAG_NONE) // Records entering and leaving trace event contexts. |category_group| and // |name| specify the context category and type. |context| is a // snapshotted context object id. -#define TRACE_EVENT_ENTER_CONTEXT(category_group, name, context) \ - INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ - TRACE_EVENT_PHASE_ENTER_CONTEXT, category_group, name, \ - TRACE_ID_DONT_MANGLE(context), TRACE_EVENT_FLAG_NONE) -#define TRACE_EVENT_LEAVE_CONTEXT(category_group, name, context) \ - INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ - TRACE_EVENT_PHASE_LEAVE_CONTEXT, category_group, name, \ - TRACE_ID_DONT_MANGLE(context), TRACE_EVENT_FLAG_NONE) +#define TRACE_EVENT_ENTER_CONTEXT(category_group, name, context) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ + TRACE_EVENT_PHASE_ENTER_CONTEXT, category_group, name, context, \ + TRACE_EVENT_FLAG_NONE) +#define TRACE_EVENT_LEAVE_CONTEXT(category_group, name, context) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ + TRACE_EVENT_PHASE_LEAVE_CONTEXT, category_group, name, context, \ + TRACE_EVENT_FLAG_NONE) #define TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context) \ - INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, \ - TRACE_ID_DONT_MANGLE(context)) + INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context) + +// Macro to specify that two trace IDs are identical. For example, +// TRACE_BIND_IDS( +// "category", "name", +// TRACE_ID_WITH_SCOPE("net::URLRequest", 0x1000), +// TRACE_ID_WITH_SCOPE("blink::ResourceFetcher::FetchRequest", 0x2000)) +// tells the trace consumer that events with ID ("net::URLRequest", 0x1000) from +// the current process have the same ID as events with ID +// ("blink::ResourceFetcher::FetchRequest", 0x2000). +#define TRACE_BIND_IDS(category_group, name, id, bind_id) \ + INTERNAL_TRACE_EVENT_ADD_BIND_IDS(category_group, name, id, bind_id); // Macro to efficiently determine if a given category group is enabled. #define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret) \ @@ -1056,11 +1075,13 @@ #define TRACE_EVENT_PHASE_CLOCK_SYNC ('c') #define TRACE_EVENT_PHASE_ENTER_CONTEXT ('(') #define TRACE_EVENT_PHASE_LEAVE_CONTEXT (')') +#define TRACE_EVENT_PHASE_BIND_IDS ('=') // Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT. #define TRACE_EVENT_FLAG_NONE (static_cast(0)) #define TRACE_EVENT_FLAG_COPY (static_cast(1 << 0)) #define TRACE_EVENT_FLAG_HAS_ID (static_cast(1 << 1)) +// TODO(crbug.com/639003): Free this bit after ID mangling is deprecated. #define TRACE_EVENT_FLAG_MANGLE_ID (static_cast(1 << 2)) #define TRACE_EVENT_FLAG_SCOPE_OFFSET (static_cast(1 << 3)) #define TRACE_EVENT_FLAG_SCOPE_EXTRA (static_cast(1 << 4)) @@ -1071,6 +1092,8 @@ #define TRACE_EVENT_FLAG_FLOW_OUT (static_cast(1 << 9)) #define TRACE_EVENT_FLAG_HAS_CONTEXT_ID (static_cast(1 << 10)) #define TRACE_EVENT_FLAG_HAS_PROCESS_ID (static_cast(1 << 11)) +#define TRACE_EVENT_FLAG_HAS_LOCAL_ID (static_cast(1 << 12)) +#define TRACE_EVENT_FLAG_HAS_GLOBAL_ID (static_cast(1 << 13)) #define TRACE_EVENT_FLAG_SCOPE_MASK \ (static_cast(TRACE_EVENT_FLAG_SCOPE_OFFSET | \ diff --git a/deps/v8/build/config/win/msvs_dependencies.isolate b/deps/v8/build/config/win/msvs_dependencies.isolate deleted file mode 100644 index ff922273634a9f..00000000000000 --- a/deps/v8/build/config/win/msvs_dependencies.isolate +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright 2015 the V8 project authors. All rights reserved. -# Copyright 2015 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. -{ - 'conditions': [ - # Copy the VS runtime DLLs into the isolate so that they - # don't have to be preinstalled on the target machine. - # - # VS2013 runtimes - ['OS=="win" and msvs_version==2013 and component=="shared_library" and CONFIGURATION_NAME=="Debug"', { - 'variables': { - 'files': [ - '<(PRODUCT_DIR)/x64/msvcp120d.dll', - '<(PRODUCT_DIR)/x64/msvcr120d.dll', - ], - }, - }], - ['OS=="win" and msvs_version==2013 and component=="shared_library" and CONFIGURATION_NAME=="Release"', { - 'variables': { - 'files': [ - '<(PRODUCT_DIR)/x64/msvcp120.dll', - '<(PRODUCT_DIR)/x64/msvcr120.dll', - ], - }, - }], - ['OS=="win" and msvs_version==2013 and component=="shared_library" and (CONFIGURATION_NAME=="Debug" or CONFIGURATION_NAME=="Debug_x64")', { - 'variables': { - 'files': [ - '<(PRODUCT_DIR)/msvcp120d.dll', - '<(PRODUCT_DIR)/msvcr120d.dll', - ], - }, - }], - ['OS=="win" and msvs_version==2013 and component=="shared_library" and (CONFIGURATION_NAME=="Release" or CONFIGURATION_NAME=="Release_x64")', { - 'variables': { - 'files': [ - '<(PRODUCT_DIR)/msvcp120.dll', - '<(PRODUCT_DIR)/msvcr120.dll', - ], - }, - }], - # VS2015 runtimes - ['OS=="win" and msvs_version==2015 and component=="shared_library" and CONFIGURATION_NAME=="Debug"', { - 'variables': { - 'files': [ - '<(PRODUCT_DIR)/x64/msvcp140d.dll', - '<(PRODUCT_DIR)/x64/vccorlib140d.dll', - ], - }, - }], - ['OS=="win" and msvs_version==2015 and component=="shared_library" and CONFIGURATION_NAME=="Release"', { - 'variables': { - 'files': [ - '<(PRODUCT_DIR)/x64/msvcp140.dll', - '<(PRODUCT_DIR)/x64/vccorlib140.dll', - ], - }, - }], - ['OS=="win" and msvs_version==2015 and component=="shared_library" and (CONFIGURATION_NAME=="Debug" or CONFIGURATION_NAME=="Debug_x64")', { - 'variables': { - 'files': [ - '<(PRODUCT_DIR)/msvcp140d.dll', - '<(PRODUCT_DIR)/vccorlib140d.dll', - ], - }, - }], - ['OS=="win" and msvs_version==2015 and component=="shared_library" and (CONFIGURATION_NAME=="Release" or CONFIGURATION_NAME=="Release_x64")', { - 'variables': { - 'files': [ - '<(PRODUCT_DIR)/msvcp140.dll', - '<(PRODUCT_DIR)/vccorlib140.dll', - ], - }, - }], - ], -} \ No newline at end of file diff --git a/deps/v8/build/has_valgrind.py b/deps/v8/build/has_valgrind.py deleted file mode 100755 index 83a848d50b3049..00000000000000 --- a/deps/v8/build/has_valgrind.py +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env python -# Copyright 2016 the V8 project authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import os - -BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -VALGRIND_DIR = os.path.join(BASE_DIR, 'third_party', 'valgrind') -LINUX32_DIR = os.path.join(VALGRIND_DIR, 'linux_x86') -LINUX64_DIR = os.path.join(VALGRIND_DIR, 'linux_x64') - - -def DoMain(_): - """Hook to be called from gyp without starting a separate python - interpreter.""" - return int(os.path.exists(LINUX32_DIR) and os.path.exists(LINUX64_DIR)) - - -if __name__ == '__main__': - print DoMain([]) diff --git a/deps/v8/build_overrides/build.gni b/deps/v8/build_overrides/build.gni new file mode 100644 index 00000000000000..6b8a4ff21921a1 --- /dev/null +++ b/deps/v8/build_overrides/build.gni @@ -0,0 +1,26 @@ +# Copyright 2016 The V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +mac_sdk_min_build_override = "10.10" +mac_deployment_target_build_override = "10.7" + +# Variable that can be used to support multiple build scenarios, like having +# Chromium specific targets in a client project's GN file etc. +build_with_chromium = false + +# Uncomment these to specify a different NDK location and version in +# non-Chromium builds. +# default_android_ndk_root = "//third_party/android_tools/ndk" +# default_android_ndk_version = "r10e" + +# Some non-Chromium builds don't support building java targets. +enable_java_templates = false + +# Some non-Chromium builds don't use Chromium's third_party/binutils. +linux_use_bundled_binutils_override = true + +# Allows different projects to specify their own suppressions files. +asan_suppressions_file = "//build/sanitizers/asan_suppressions.cc" +lsan_suppressions_file = "//build/sanitizers/lsan_suppressions.cc" +tsan_suppressions_file = "//build/sanitizers/tsan_suppressions.cc" diff --git a/deps/v8/build_overrides/gtest.gni b/deps/v8/build_overrides/gtest.gni new file mode 100644 index 00000000000000..54c16b149b4f47 --- /dev/null +++ b/deps/v8/build_overrides/gtest.gni @@ -0,0 +1,15 @@ +# Copyright 2016 The V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# Exclude support for registering main function in multi-process tests. +gtest_include_multiprocess = false + +# Exclude support for platform-specific operations across unit tests. +gtest_include_platform_test = false + +# Exclude support for testing Objective C code on OS X and iOS. +gtest_include_objc_support = false + +# Exclude support for flushing coverage files on iOS. +gtest_include_ios_coverage = false diff --git a/deps/v8/build_overrides/v8.gni b/deps/v8/build_overrides/v8.gni new file mode 100644 index 00000000000000..09ea4570b02032 --- /dev/null +++ b/deps/v8/build_overrides/v8.gni @@ -0,0 +1,32 @@ +# Copyright 2015 The V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import("//build/config/features.gni") +import("//build/config/ui.gni") +import("//build/config/v8_target_cpu.gni") +import("//gni/v8.gni") + +if (is_android) { + import("//build/config/android/config.gni") +} + +if (((v8_current_cpu == "x86" || v8_current_cpu == "x64" || + v8_current_cpu == "x87") && (is_linux || is_mac)) || + (v8_current_cpu == "ppc64" && is_linux)) { + v8_enable_gdbjit_default = true +} + +v8_imminent_deprecation_warnings_default = true + +# Add simple extras solely for the purpose of the cctests. +v8_extra_library_files = [ "//test/cctest/test-extra.js" ] +v8_experimental_extra_library_files = + [ "//test/cctest/test-experimental-extra.js" ] + +declare_args() { + # Enable inspector. See include/v8-inspector.h. + v8_enable_inspector = false +} + +v8_enable_inspector_override = v8_enable_inspector diff --git a/deps/v8/gni/isolate.gni b/deps/v8/gni/isolate.gni new file mode 100644 index 00000000000000..93c828d2cdf0fb --- /dev/null +++ b/deps/v8/gni/isolate.gni @@ -0,0 +1,175 @@ +# Copyright 2016 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import("//build/config/sanitizers/sanitizers.gni") +import("//third_party/icu/config.gni") +import("v8.gni") + +declare_args() { + # Sets the test isolation mode (noop|prepare|check). + v8_test_isolation_mode = "noop" +} + +template("v8_isolate_run") { + # Remember target name as within the action scope the target name will be + # different. + name = target_name + if (name != "" && invoker.isolate != "" && invoker.deps != [] && + v8_test_isolation_mode != "noop") { + action(name + "_run") { + testonly = true + + deps = invoker.deps + + script = "//tools/isolate_driver.py" + + sources = [ + invoker.isolate, + ] + + inputs = [ + # Files that are known to be involved in this step. + "//tools/swarming_client/isolate.py", + "//tools/swarming_client/run_isolated.py", + ] + + if (v8_test_isolation_mode == "prepare") { + outputs = [ + "$root_out_dir/$name.isolated.gen.json", + ] + } else if (v8_test_isolation_mode == "check") { + outputs = [ + "$root_out_dir/$name.isolated", + "$root_out_dir/$name.isolated.state", + ] + } + + # Translate gn to gyp variables. + if (is_asan) { + asan = "1" + } else { + asan = "0" + } + if (is_msan) { + msan = "1" + } else { + msan = "0" + } + if (is_tsan) { + tsan = "1" + } else { + tsan = "0" + } + if (is_cfi) { + cfi_vptr = "1" + } else { + cfi_vptr = "0" + } + if (target_cpu == "x86") { + target_arch = "ia32" + } else { + target_arch = target_cpu + } + if (is_debug) { + configuration_name = "Debug" + } else { + configuration_name = "Release" + } + if (is_component_build) { + component = "shared_library" + } else { + component = "static_library" + } + if (icu_use_data_file) { + icu_use_data_file_flag = "1" + } else { + icu_use_data_file_flag = "0" + } + if (v8_use_external_startup_data) { + use_external_startup_data = "1" + } else { + use_external_startup_data = "0" + } + if (v8_use_snapshot) { + use_snapshot = "true" + } else { + use_snapshot = "false" + } + if (v8_has_valgrind) { + has_valgrind = "1" + } else { + has_valgrind = "0" + } + if (v8_gcmole) { + gcmole = "1" + } else { + gcmole = "0" + } + + + # Note, all paths will be rebased in isolate_driver.py to be relative to + # the isolate file. + args = [ + v8_test_isolation_mode, + "--isolated", + rebase_path("$root_out_dir/$name.isolated", root_build_dir), + "--isolate", + rebase_path(invoker.isolate, root_build_dir), + + # Path variables are used to replace file paths when loading a .isolate + # file + "--path-variable", + "DEPTH", + rebase_path("//", root_build_dir), + "--path-variable", + "PRODUCT_DIR", + rebase_path(root_out_dir, root_build_dir), + + # TODO(machenbach): Set variables for remaining features. + "--config-variable", + "CONFIGURATION_NAME=$configuration_name", + "--config-variable", + "OS=$target_os", + "--config-variable", + "asan=$asan", + "--config-variable", + "cfi_vptr=$cfi_vptr", + "--config-variable", + "gcmole=$gcmole", + "--config-variable", + "has_valgrind=$has_valgrind", + "--config-variable", + "icu_use_data_file_flag=$icu_use_data_file_flag", + "--config-variable", + "msan=$msan", + "--config-variable", + "tsan=$tsan", + "--config-variable", + "coverage=0", + "--config-variable", + "sanitizer_coverage=0", + "--config-variable", + "component=$component", + "--config-variable", + "target_arch=$target_arch", + "--config-variable", + "v8_use_external_startup_data=$use_external_startup_data", + "--config-variable", + "v8_use_snapshot=$use_snapshot", + ] + + if (is_win) { + args += [ + "--config-variable", + "msvs_version=2013", + ] + } else { + args += [ + "--config-variable", + "msvs_version=0", + ] + } + } + } +} diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni new file mode 100644 index 00000000000000..7ff7f6fb89ac36 --- /dev/null +++ b/deps/v8/gni/v8.gni @@ -0,0 +1,108 @@ +# Copyright 2016 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import("//build/config/sanitizers/sanitizers.gni") +import("//build/config/v8_target_cpu.gni") + +declare_args() { + # Indicate if valgrind was fetched as a custom deps to make it available on + # swarming. + v8_has_valgrind = false + + # Indicate if gcmole was fetched as a hook to make it available on swarming. + v8_gcmole = false + + # Turns on compiler optimizations in V8 in Debug build. + v8_optimized_debug = true + + # Support for backtrace_symbols on linux. + v8_enable_backtrace = "" + + # Enable the snapshot feature, for fast context creation. + # http://v8project.blogspot.com/2015/09/custom-startup-snapshots.html + v8_use_snapshot = true + + # Use external files for startup data blobs: + # the JS builtins sources and the start snapshot. + v8_use_external_startup_data = "" +} + +if (v8_use_external_startup_data == "") { + # If not specified as a gn arg, use external startup data by default if + # a snapshot is used and if we're not on ios. + v8_use_external_startup_data = v8_use_snapshot && !is_ios +} + +if (v8_enable_backtrace == "") { + v8_enable_backtrace = is_debug && !v8_optimized_debug +} + +############################################################################### +# Templates +# + +# Points to // in v8 stand-alone or to //v8/ in chromium. We need absolute +# paths for all configs in templates as they are shared in different +# subdirectories. +v8_path_prefix = get_path_info("../", "abspath") + +# Common configs to remove or add in all v8 targets. +v8_remove_configs = [ "//build/config/compiler:chromium_code" ] +v8_add_configs = [ + "//build/config/compiler:no_chromium_code", + v8_path_prefix + ":features", + v8_path_prefix + ":toolchain", +] + +if (is_debug && !v8_optimized_debug) { + v8_remove_configs += [ "//build/config/compiler:default_optimization" ] + v8_add_configs += [ "//build/config/compiler:no_optimize" ] +} else { + v8_remove_configs += [ "//build/config/compiler:default_optimization" ] + + # TODO(crbug.com/621335) Rework this so that we don't have the confusion + # between "optimize_speed" and "optimize_max". + if (is_posix && !is_android && !using_sanitizer) { + v8_add_configs += [ "//build/config/compiler:optimize_speed" ] + } else { + v8_add_configs += [ "//build/config/compiler:optimize_max" ] + } +} + +if (is_posix && v8_enable_backtrace) { + v8_remove_configs += [ "//build/config/gcc:symbol_visibility_hidden" ] + v8_add_configs += [ "//build/config/gcc:symbol_visibility_default" ] +} + +# All templates should be kept in sync. +template("v8_source_set") { + source_set(target_name) { + forward_variables_from(invoker, "*", [ "configs" ]) + configs += invoker.configs + configs -= v8_remove_configs + configs += v8_add_configs + } +} + +template("v8_executable") { + executable(target_name) { + forward_variables_from(invoker, "*", [ "configs" ]) + configs += invoker.configs + configs -= v8_remove_configs + configs += v8_add_configs + if (is_linux) { + # For enabling ASLR. + ldflags = [ "-pie" ] + } + } +} + +template("v8_component") { + component(target_name) { + forward_variables_from(invoker, "*", [ "configs" ]) + configs += invoker.configs + configs -= v8_remove_configs + configs += v8_add_configs + } +} diff --git a/deps/v8/build/OWNERS b/deps/v8/gypfiles/OWNERS similarity index 100% rename from deps/v8/build/OWNERS rename to deps/v8/gypfiles/OWNERS diff --git a/deps/v8/build/README.txt b/deps/v8/gypfiles/README.txt similarity index 100% rename from deps/v8/build/README.txt rename to deps/v8/gypfiles/README.txt diff --git a/deps/v8/build/all.gyp b/deps/v8/gypfiles/all.gyp similarity index 80% rename from deps/v8/build/all.gyp rename to deps/v8/gypfiles/all.gyp index feaf4fecccabf5..6b4ef82d6906c1 100644 --- a/deps/v8/build/all.gyp +++ b/deps/v8/gypfiles/all.gyp @@ -8,11 +8,7 @@ 'target_name': 'All', 'type': 'none', 'dependencies': [ - '../samples/samples.gyp:*', '../src/d8.gyp:d8', - '../test/cctest/cctest.gyp:*', - '../test/fuzzer/fuzzer.gyp:*', - '../test/unittests/unittests.gyp:*', ], 'conditions': [ ['component!="shared_library"', { @@ -20,12 +16,20 @@ '../tools/parser-shell.gyp:parser-shell', ], }], + # These items don't compile for Android on Mac. + ['host_os!="mac" or OS!="android"', { + 'dependencies': [ + '../samples/samples.gyp:*', + '../test/cctest/cctest.gyp:*', + '../test/fuzzer/fuzzer.gyp:*', + '../test/unittests/unittests.gyp:*', + ], + }], ['test_isolation_mode != "noop"', { 'dependencies': [ '../test/bot_default.gyp:*', '../test/benchmarks/benchmarks.gyp:*', '../test/default.gyp:*', - '../test/ignition.gyp:*', '../test/intl/intl.gyp:*', '../test/message/message.gyp:*', '../test/mjsunit/mjsunit.gyp:*', diff --git a/deps/v8/build/coverage_wrapper.py b/deps/v8/gypfiles/coverage_wrapper.py similarity index 100% rename from deps/v8/build/coverage_wrapper.py rename to deps/v8/gypfiles/coverage_wrapper.py diff --git a/deps/v8/build/detect_v8_host_arch.py b/deps/v8/gypfiles/detect_v8_host_arch.py similarity index 100% rename from deps/v8/build/detect_v8_host_arch.py rename to deps/v8/gypfiles/detect_v8_host_arch.py diff --git a/deps/v8/build/download_gold_plugin.py b/deps/v8/gypfiles/download_gold_plugin.py similarity index 100% rename from deps/v8/build/download_gold_plugin.py rename to deps/v8/gypfiles/download_gold_plugin.py diff --git a/deps/v8/build/features.gypi b/deps/v8/gypfiles/features.gypi similarity index 99% rename from deps/v8/build/features.gypi rename to deps/v8/gypfiles/features.gypi index 5a21a63e324f49..f82a59f0f82f53 100644 --- a/deps/v8/build/features.gypi +++ b/deps/v8/gypfiles/features.gypi @@ -123,7 +123,7 @@ }, # Debug 'Release': { 'variables': { - 'v8_enable_handle_zapping%': 0, + 'v8_enable_handle_zapping%': 1, }, 'conditions': [ ['v8_enable_handle_zapping==1', { diff --git a/deps/v8/build/get_landmines.py b/deps/v8/gypfiles/get_landmines.py similarity index 95% rename from deps/v8/build/get_landmines.py rename to deps/v8/gypfiles/get_landmines.py index 2bbf7a61bfe290..9fcca4b968a888 100755 --- a/deps/v8/build/get_landmines.py +++ b/deps/v8/gypfiles/get_landmines.py @@ -27,6 +27,7 @@ def main(): print 'Switching to pinned msvs toolchain.' print 'Clobbering to hopefully resolve problem with mksnapshot' print 'Clobber after ICU roll.' + print 'Clobber after Android NDK update.' return 0 diff --git a/deps/v8/build/gyp_environment.py b/deps/v8/gypfiles/gyp_environment.py similarity index 90% rename from deps/v8/build/gyp_environment.py rename to deps/v8/gypfiles/gyp_environment.py index 7a4e6221482688..76ae841ffb6a08 100644 --- a/deps/v8/build/gyp_environment.py +++ b/deps/v8/gypfiles/gyp_environment.py @@ -31,6 +31,7 @@ def apply_gyp_environment(file_path=None): supported_vars = ( 'V8_GYP_FILE', 'V8_GYP_SYNTAX_CHECK', 'GYP_DEFINES', + 'GYP_GENERATORS', 'GYP_GENERATOR_FLAGS', 'GYP_GENERATOR_OUTPUT', ) for var in supported_vars: @@ -51,4 +52,9 @@ def set_environment(): # Update the environment based on v8.gyp_env gyp_env_path = os.path.join(os.path.dirname(V8_ROOT), 'v8.gyp_env') apply_gyp_environment(gyp_env_path) + + if not os.environ.get('GYP_GENERATORS'): + # Default to ninja on all platforms. + os.environ['GYP_GENERATORS'] = 'ninja' + vs_toolchain.SetEnvironmentAndGetRuntimeDllDirs() diff --git a/deps/v8/build/gyp_v8 b/deps/v8/gypfiles/gyp_v8 similarity index 89% rename from deps/v8/build/gyp_v8 rename to deps/v8/gypfiles/gyp_v8 index 8813f2c12162ba..b8b5f742b1e27d 100755 --- a/deps/v8/build/gyp_v8 +++ b/deps/v8/gypfiles/gyp_v8 @@ -43,7 +43,7 @@ import vs_toolchain script_dir = os.path.dirname(os.path.realpath(__file__)) v8_root = os.path.abspath(os.path.join(script_dir, os.pardir)) -sys.path.insert(0, os.path.join(v8_root, 'build', 'gyp', 'pylib')) +sys.path.insert(0, os.path.join(v8_root, 'tools', 'gyp', 'pylib')) import gyp # Add paths so that pymod_do_main(...) can import files. @@ -90,7 +90,7 @@ def additional_include_files(args=[]): result.append(path) # Always include standalone.gypi - AddInclude(os.path.join(v8_root, 'build', 'standalone.gypi')) + AddInclude(os.path.join(v8_root, 'gypfiles', 'standalone.gypi')) # Optionally add supplemental .gypi files if present. supplements = glob.glob(os.path.join(v8_root, '*', 'supplement.gypi')) @@ -118,6 +118,22 @@ def run_gyp(args): if __name__ == '__main__': args = sys.argv[1:] + gyp_chromium_no_action = os.environ.get('GYP_CHROMIUM_NO_ACTION') + if gyp_chromium_no_action == '1': + print 'Skipping gyp_v8 due to GYP_CHROMIUM_NO_ACTION env var.' + sys.exit(0) + + running_as_hook = '--running-as-hook' + if running_as_hook in args and gyp_chromium_no_action != '0': + print 'GYP is now disabled by default in runhooks.\n' + print 'If you really want to run this, either run ' + print '`python gypfiles/gyp_v8` explicitly by hand ' + print 'or set the environment variable GYP_CHROMIUM_NO_ACTION=0.' + sys.exit(0) + + if running_as_hook in args: + args.remove(running_as_hook) + gyp_environment.set_environment() # This could give false positives since it doesn't actually do real option diff --git a/deps/v8/build/gyp_v8.py b/deps/v8/gypfiles/gyp_v8.py similarity index 100% rename from deps/v8/build/gyp_v8.py rename to deps/v8/gypfiles/gyp_v8.py diff --git a/deps/v8/build/isolate.gypi b/deps/v8/gypfiles/isolate.gypi similarity index 92% rename from deps/v8/build/isolate.gypi rename to deps/v8/gypfiles/isolate.gypi index 4cfbbfddd100d7..149818c8d0636f 100644 --- a/deps/v8/build/isolate.gypi +++ b/deps/v8/gypfiles/isolate.gypi @@ -17,7 +17,7 @@ # 'foo_test', # ], # 'includes': [ -# '../build/isolate.gypi', +# '../gypfiles/isolate.gypi', # ], # 'sources': [ # 'foo_test.isolate', @@ -73,15 +73,13 @@ '--config-variable', 'cfi_vptr=<(cfi_vptr)', '--config-variable', 'gcmole=<(gcmole)', '--config-variable', 'has_valgrind=<(has_valgrind)', - '--config-variable', 'icu_use_data_file_flag=0', + '--config-variable', 'icu_use_data_file_flag=<(icu_use_data_file_flag)', '--config-variable', 'msan=<(msan)', '--config-variable', 'tsan=<(tsan)', '--config-variable', 'coverage=<(coverage)', '--config-variable', 'sanitizer_coverage=<(sanitizer_coverage)', '--config-variable', 'component=<(component)', '--config-variable', 'target_arch=<(target_arch)', - '--config-variable', 'use_custom_libcxx=<(use_custom_libcxx)', - '--config-variable', 'v8_separate_ignition_snapshot=<(v8_separate_ignition_snapshot)', '--config-variable', 'v8_use_external_startup_data=<(v8_use_external_startup_data)', '--config-variable', 'v8_use_snapshot=<(v8_use_snapshot)', ], diff --git a/deps/v8/build/landmine_utils.py b/deps/v8/gypfiles/landmine_utils.py similarity index 100% rename from deps/v8/build/landmine_utils.py rename to deps/v8/gypfiles/landmine_utils.py diff --git a/deps/v8/build/landmines.py b/deps/v8/gypfiles/landmines.py similarity index 99% rename from deps/v8/build/landmines.py rename to deps/v8/gypfiles/landmines.py index 97c63901c1a904..2a81c66d1a6529 100755 --- a/deps/v8/build/landmines.py +++ b/deps/v8/gypfiles/landmines.py @@ -198,7 +198,7 @@ def process_options(): parser = optparse.OptionParser() parser.add_option( '-s', '--landmine-scripts', action='append', - default=[os.path.join(SRC_DIR, 'build', 'get_landmines.py')], + default=[os.path.join(SRC_DIR, 'gypfiles', 'get_landmines.py')], help='Path to the script which emits landmines to stdout. The target ' 'is passed to this script via option -t. Note that an extra ' 'script can be specified via an env var EXTRA_LANDMINES_SCRIPT.') diff --git a/deps/v8/build/mac/asan.gyp b/deps/v8/gypfiles/mac/asan.gyp similarity index 100% rename from deps/v8/build/mac/asan.gyp rename to deps/v8/gypfiles/mac/asan.gyp diff --git a/deps/v8/gypfiles/set_clang_warning_flags.gypi b/deps/v8/gypfiles/set_clang_warning_flags.gypi new file mode 100644 index 00000000000000..63d5f1435ccb51 --- /dev/null +++ b/deps/v8/gypfiles/set_clang_warning_flags.gypi @@ -0,0 +1,59 @@ +# Copyright 2016 the V8 project authors. All rights reserved. +# Copyright (c) 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# This file is meant to be included to set clang-specific compiler flags. +# To use this the following variable can be defined: +# clang_warning_flags: list: Compiler flags to pass to clang. +# clang_warning_flags_unset: list: Compiler flags to not pass to clang. +# +# Only use this in third-party code. In chromium_code, fix your code to not +# warn instead! +# +# Note that the gypi file is included in target_defaults, so it does not need +# to be explicitly included. +# +# Warning flags set by this will be used on all platforms. If you want to set +# warning flags on only some platforms, you have to do so manually. +# +# To use this, create a gyp target with the following form: +# { +# 'target_name': 'my_target', +# 'variables': { +# 'clang_warning_flags': ['-Wno-awesome-warning'], +# 'clang_warning_flags_unset': ['-Wpreviously-set-flag'], +# } +# } + +{ + 'variables': { + 'clang_warning_flags_unset%': [], # Provide a default value. + }, + 'conditions': [ + ['clang==1', { + # This uses >@ instead of @< to also see clang_warning_flags set in + # targets directly, not just the clang_warning_flags in target_defaults. + 'cflags': [ '>@(clang_warning_flags)' ], + 'cflags!': [ '>@(clang_warning_flags_unset)' ], + 'xcode_settings': { + 'WARNING_CFLAGS': ['>@(clang_warning_flags)'], + 'WARNING_CFLAGS!': ['>@(clang_warning_flags_unset)'], + }, + 'msvs_settings': { + 'VCCLCompilerTool': { + 'AdditionalOptions': [ '>@(clang_warning_flags)' ], + 'AdditionalOptions!': [ '>@(clang_warning_flags_unset)' ], + }, + }, + }], + ['clang==0 and host_clang==1', { + 'target_conditions': [ + ['_toolset=="host"', { + 'cflags': [ '>@(clang_warning_flags)' ], + 'cflags!': [ '>@(clang_warning_flags_unset)' ], + }], + ], + }], + ], +} diff --git a/deps/v8/build/shim_headers.gypi b/deps/v8/gypfiles/shim_headers.gypi similarity index 100% rename from deps/v8/build/shim_headers.gypi rename to deps/v8/gypfiles/shim_headers.gypi diff --git a/deps/v8/build/standalone.gypi b/deps/v8/gypfiles/standalone.gypi similarity index 85% rename from deps/v8/build/standalone.gypi rename to deps/v8/gypfiles/standalone.gypi index 6c88409dbee892..7e41ce84aef96d 100644 --- a/deps/v8/build/standalone.gypi +++ b/deps/v8/gypfiles/standalone.gypi @@ -46,38 +46,74 @@ 'msvs_multi_core_compile%': '1', 'mac_deployment_target%': '10.7', 'release_extra_cflags%': '', + 'v8_enable_inspector%': 0, 'variables': { 'variables': { 'variables': { - 'conditions': [ - ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or \ - OS=="netbsd" or OS=="mac" or OS=="qnx" or OS=="aix"', { - # This handles the Unix platforms we generally deal with. - # Anything else gets passed through, which probably won't work - # very well; such hosts should pass an explicit target_arch - # to gyp. - 'host_arch%': ' +#include +#include + +namespace v8 { +namespace platform { +namespace tracing { + +const int kTraceMaxNumArgs = 2; + +class TraceObject { + public: + union ArgValue { + bool as_bool; + uint64_t as_uint; + int64_t as_int; + double as_double; + const void* as_pointer; + const char* as_string; + }; + + TraceObject() {} + ~TraceObject(); + void Initialize(char phase, const uint8_t* category_enabled_flag, + const char* name, const char* scope, uint64_t id, + uint64_t bind_id, int num_args, const char** arg_names, + const uint8_t* arg_types, const uint64_t* arg_values, + unsigned int flags); + void UpdateDuration(); + void InitializeForTesting(char phase, const uint8_t* category_enabled_flag, + const char* name, const char* scope, uint64_t id, + uint64_t bind_id, int num_args, + const char** arg_names, const uint8_t* arg_types, + const uint64_t* arg_values, unsigned int flags, + int pid, int tid, int64_t ts, int64_t tts, + uint64_t duration, uint64_t cpu_duration); + + int pid() const { return pid_; } + int tid() const { return tid_; } + char phase() const { return phase_; } + const uint8_t* category_enabled_flag() const { + return category_enabled_flag_; + } + const char* name() const { return name_; } + const char* scope() const { return scope_; } + uint64_t id() const { return id_; } + uint64_t bind_id() const { return bind_id_; } + int num_args() const { return num_args_; } + const char** arg_names() { return arg_names_; } + uint8_t* arg_types() { return arg_types_; } + ArgValue* arg_values() { return arg_values_; } + unsigned int flags() const { return flags_; } + int64_t ts() { return ts_; } + int64_t tts() { return tts_; } + uint64_t duration() { return duration_; } + uint64_t cpu_duration() { return cpu_duration_; } + + private: + int pid_; + int tid_; + char phase_; + const char* name_; + const char* scope_; + const uint8_t* category_enabled_flag_; + uint64_t id_; + uint64_t bind_id_; + int num_args_; + const char* arg_names_[kTraceMaxNumArgs]; + uint8_t arg_types_[kTraceMaxNumArgs]; + ArgValue arg_values_[kTraceMaxNumArgs]; + char* parameter_copy_storage_ = nullptr; + unsigned int flags_; + int64_t ts_; + int64_t tts_; + uint64_t duration_; + uint64_t cpu_duration_; + + // Disallow copy and assign + TraceObject(const TraceObject&) = delete; + void operator=(const TraceObject&) = delete; +}; + +class TraceWriter { + public: + TraceWriter() {} + virtual ~TraceWriter() {} + virtual void AppendTraceEvent(TraceObject* trace_event) = 0; + virtual void Flush() = 0; + + static TraceWriter* CreateJSONTraceWriter(std::ostream& stream); + + private: + // Disallow copy and assign + TraceWriter(const TraceWriter&) = delete; + void operator=(const TraceWriter&) = delete; +}; + +class TraceBufferChunk { + public: + explicit TraceBufferChunk(uint32_t seq); + + void Reset(uint32_t new_seq); + bool IsFull() const { return next_free_ == kChunkSize; } + TraceObject* AddTraceEvent(size_t* event_index); + TraceObject* GetEventAt(size_t index) { return &chunk_[index]; } + + uint32_t seq() const { return seq_; } + size_t size() const { return next_free_; } + + static const size_t kChunkSize = 64; + + private: + size_t next_free_ = 0; + TraceObject chunk_[kChunkSize]; + uint32_t seq_; + + // Disallow copy and assign + TraceBufferChunk(const TraceBufferChunk&) = delete; + void operator=(const TraceBufferChunk&) = delete; +}; + +class TraceBuffer { + public: + TraceBuffer() {} + virtual ~TraceBuffer() {} + + virtual TraceObject* AddTraceEvent(uint64_t* handle) = 0; + virtual TraceObject* GetEventByHandle(uint64_t handle) = 0; + virtual bool Flush() = 0; + + static const size_t kRingBufferChunks = 1024; + + static TraceBuffer* CreateTraceBufferRingBuffer(size_t max_chunks, + TraceWriter* trace_writer); + + private: + // Disallow copy and assign + TraceBuffer(const TraceBuffer&) = delete; + void operator=(const TraceBuffer&) = delete; +}; + +// Options determines how the trace buffer stores data. +enum TraceRecordMode { + // Record until the trace buffer is full. + RECORD_UNTIL_FULL, + + // Record until the user ends the trace. The trace buffer is a fixed size + // and we use it as a ring buffer during recording. + RECORD_CONTINUOUSLY, + + // Record until the trace buffer is full, but with a huge buffer size. + RECORD_AS_MUCH_AS_POSSIBLE, + + // Echo to console. Events are discarded. + ECHO_TO_CONSOLE, +}; + +class TraceConfig { + public: + typedef std::vector StringList; + + static TraceConfig* CreateDefaultTraceConfig(); + + TraceConfig() + : enable_sampling_(false), + enable_systrace_(false), + enable_argument_filter_(false) {} + TraceRecordMode GetTraceRecordMode() const { return record_mode_; } + bool IsSamplingEnabled() const { return enable_sampling_; } + bool IsSystraceEnabled() const { return enable_systrace_; } + bool IsArgumentFilterEnabled() const { return enable_argument_filter_; } + + void SetTraceRecordMode(TraceRecordMode mode) { record_mode_ = mode; } + void EnableSampling() { enable_sampling_ = true; } + void EnableSystrace() { enable_systrace_ = true; } + void EnableArgumentFilter() { enable_argument_filter_ = true; } + + void AddIncludedCategory(const char* included_category); + void AddExcludedCategory(const char* excluded_category); + + bool IsCategoryGroupEnabled(const char* category_group) const; + + private: + TraceRecordMode record_mode_; + bool enable_sampling_ : 1; + bool enable_systrace_ : 1; + bool enable_argument_filter_ : 1; + StringList included_categories_; + StringList excluded_categories_; + + // Disallow copy and assign + TraceConfig(const TraceConfig&) = delete; + void operator=(const TraceConfig&) = delete; +}; + +class TracingController { + public: + enum Mode { DISABLED = 0, RECORDING_MODE }; + + // The pointer returned from GetCategoryGroupEnabledInternal() points to a + // value with zero or more of the following bits. Used in this class only. + // The TRACE_EVENT macros should only use the value as a bool. + // These values must be in sync with macro values in TraceEvent.h in Blink. + enum CategoryGroupEnabledFlags { + // Category group enabled for the recording mode. + ENABLED_FOR_RECORDING = 1 << 0, + // Category group enabled by SetEventCallbackEnabled(). + ENABLED_FOR_EVENT_CALLBACK = 1 << 2, + // Category group enabled to export events to ETW. + ENABLED_FOR_ETW_EXPORT = 1 << 3 + }; + + TracingController() {} + void Initialize(TraceBuffer* trace_buffer); + const uint8_t* GetCategoryGroupEnabled(const char* category_group); + static const char* GetCategoryGroupName(const uint8_t* category_enabled_flag); + uint64_t AddTraceEvent(char phase, const uint8_t* category_enabled_flag, + const char* name, const char* scope, uint64_t id, + uint64_t bind_id, int32_t num_args, + const char** arg_names, const uint8_t* arg_types, + const uint64_t* arg_values, unsigned int flags); + void UpdateTraceEventDuration(const uint8_t* category_enabled_flag, + const char* name, uint64_t handle); + + void StartTracing(TraceConfig* trace_config); + void StopTracing(); + + private: + const uint8_t* GetCategoryGroupEnabledInternal(const char* category_group); + void UpdateCategoryGroupEnabledFlag(size_t category_index); + void UpdateCategoryGroupEnabledFlags(); + + std::unique_ptr trace_buffer_; + std::unique_ptr trace_config_; + Mode mode_ = DISABLED; + + // Disallow copy and assign + TracingController(const TracingController&) = delete; + void operator=(const TracingController&) = delete; +}; + +} // namespace tracing +} // namespace platform +} // namespace v8 + +#endif // V8_LIBPLATFORM_V8_TRACING_H_ diff --git a/deps/v8/include/v8-debug.h b/deps/v8/include/v8-debug.h index 50314501e3b3b9..6385a31d85e80f 100644 --- a/deps/v8/include/v8-debug.h +++ b/deps/v8/include/v8-debug.h @@ -18,13 +18,11 @@ enum DebugEvent { Exception = 2, NewFunction = 3, BeforeCompile = 4, - AfterCompile = 5, + AfterCompile = 5, CompileError = 6, - PromiseEvent = 7, - AsyncTaskEvent = 8, + AsyncTaskEvent = 7, }; - class V8_EXPORT Debug { public: /** @@ -127,6 +125,8 @@ class V8_EXPORT Debug { */ virtual ClientData* GetClientData() const = 0; + virtual Isolate* GetIsolate() const = 0; + virtual ~EventDetails() {} }; @@ -157,9 +157,6 @@ class V8_EXPORT Debug { static bool SetDebugEventListener(Isolate* isolate, EventCallback that, Local data = Local()); - V8_DEPRECATED("Use version with an Isolate", - static bool SetDebugEventListener( - EventCallback that, Local data = Local())); // Schedule a debugger break to happen when JavaScript code is run // in the given isolate. @@ -174,8 +171,6 @@ class V8_EXPORT Debug { // Message based interface. The message protocol is JSON. static void SetMessageHandler(Isolate* isolate, MessageHandler handler); - V8_DEPRECATED("Use version with an Isolate", - static void SetMessageHandler(MessageHandler handler)); static void SendCommand(Isolate* isolate, const uint16_t* command, int length, @@ -199,9 +194,6 @@ class V8_EXPORT Debug { * } * \endcode */ - static V8_DEPRECATED("Use maybe version", - Local Call(v8::Local fun, - Local data = Local())); // TODO(dcarney): data arg should be a MaybeLocal static MaybeLocal Call(Local context, v8::Local fun, @@ -210,8 +202,6 @@ class V8_EXPORT Debug { /** * Returns a mirror object for the given object. */ - static V8_DEPRECATED("Use maybe version", - Local GetMirror(v8::Local obj)); static MaybeLocal GetMirror(Local context, v8::Local obj); @@ -247,8 +237,6 @@ class V8_EXPORT Debug { * of this method. */ static void ProcessDebugMessages(Isolate* isolate); - V8_DEPRECATED("Use version with an Isolate", - static void ProcessDebugMessages()); /** * Debugger is running in its own context which is entered while debugger @@ -258,9 +246,12 @@ class V8_EXPORT Debug { * least one DebugEventListener or MessageHandler is set. */ static Local GetDebugContext(Isolate* isolate); - V8_DEPRECATED("Use version with an Isolate", - static Local GetDebugContext()); + /** + * While in the debug context, this method returns the top-most non-debug + * context, if it exists. + */ + static MaybeLocal GetDebuggedContext(Isolate* isolate); /** * Enable/disable LiveEdit functionality for the given Isolate diff --git a/deps/v8/include/v8-experimental.h b/deps/v8/include/v8-experimental.h index 294ba647f03df9..1773345e09add4 100644 --- a/deps/v8/include/v8-experimental.h +++ b/deps/v8/include/v8-experimental.h @@ -31,13 +31,17 @@ class V8_EXPORT FastAccessorBuilder { ValueId IntegerConstant(int int_constant); ValueId GetReceiver(); ValueId LoadInternalField(ValueId value_id, int field_no); + ValueId LoadInternalFieldUnchecked(ValueId value_id, int field_no); ValueId LoadValue(ValueId value_id, int offset); ValueId LoadObject(ValueId value_id, int offset); + ValueId ToSmi(ValueId value_id); + void ReturnValue(ValueId value_id); void CheckFlagSetOrReturnNull(ValueId value_id, int mask); void CheckNotZeroOrReturnNull(ValueId value_id); LabelId MakeLabel(); void SetLabel(LabelId label_id); + void Goto(LabelId label_id); void CheckNotZeroOrJump(ValueId value_id, LabelId label_id); ValueId Call(v8::FunctionCallback callback, ValueId value_id); diff --git a/deps/v8/include/v8-inspector-protocol.h b/deps/v8/include/v8-inspector-protocol.h new file mode 100644 index 00000000000000..612a2ebc3911f5 --- /dev/null +++ b/deps/v8/include/v8-inspector-protocol.h @@ -0,0 +1,13 @@ +// Copyright 2016 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_V8_INSPECTOR_PROTOCOL_H_ +#define V8_V8_INSPECTOR_PROTOCOL_H_ + +#include "inspector/Debugger.h" // NOLINT(build/include) +#include "inspector/Runtime.h" // NOLINT(build/include) +#include "inspector/Schema.h" // NOLINT(build/include) +#include "v8-inspector.h" // NOLINT(build/include) + +#endif // V8_V8_INSPECTOR_PROTOCOL_H_ diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h new file mode 100644 index 00000000000000..0855ac101b74ef --- /dev/null +++ b/deps/v8/include/v8-inspector.h @@ -0,0 +1,267 @@ +// Copyright 2016 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_V8_INSPECTOR_H_ +#define V8_V8_INSPECTOR_H_ + +#include +#include + +#include + +#include "v8.h" // NOLINT(build/include) + +namespace v8_inspector { + +namespace protocol { +namespace Debugger { +namespace API { +class SearchMatch; +} +} +namespace Runtime { +namespace API { +class RemoteObject; +class StackTrace; +} +} +namespace Schema { +namespace API { +class Domain; +} +} +} // namespace protocol + +class V8_EXPORT StringView { + public: + StringView() : m_is8Bit(true), m_length(0), m_characters8(nullptr) {} + + StringView(const uint8_t* characters, size_t length) + : m_is8Bit(true), m_length(length), m_characters8(characters) {} + + StringView(const uint16_t* characters, size_t length) + : m_is8Bit(false), m_length(length), m_characters16(characters) {} + + bool is8Bit() const { return m_is8Bit; } + size_t length() const { return m_length; } + + // TODO(dgozman): add DCHECK(m_is8Bit) to accessors once platform can be used + // here. + const uint8_t* characters8() const { return m_characters8; } + const uint16_t* characters16() const { return m_characters16; } + + private: + bool m_is8Bit; + size_t m_length; + union { + const uint8_t* m_characters8; + const uint16_t* m_characters16; + }; +}; + +class V8_EXPORT StringBuffer { + public: + virtual ~StringBuffer() {} + virtual const StringView& string() = 0; + // This method copies contents. + static std::unique_ptr create(const StringView&); +}; + +class V8_EXPORT V8ContextInfo { + public: + V8ContextInfo(v8::Local context, int contextGroupId, + const StringView& humanReadableName) + : context(context), + contextGroupId(contextGroupId), + humanReadableName(humanReadableName), + hasMemoryOnConsole(false) {} + + v8::Local context; + // Each v8::Context is a part of a group. The group id must be non-zero. + int contextGroupId; + StringView humanReadableName; + StringView origin; + StringView auxData; + bool hasMemoryOnConsole; + + private: + // Disallow copying and allocating this one. + enum NotNullTagEnum { NotNullLiteral }; + void* operator new(size_t) = delete; + void* operator new(size_t, NotNullTagEnum, void*) = delete; + void* operator new(size_t, void*) = delete; + V8ContextInfo(const V8ContextInfo&) = delete; + V8ContextInfo& operator=(const V8ContextInfo&) = delete; +}; + +class V8_EXPORT V8StackTrace { + public: + virtual bool isEmpty() const = 0; + virtual StringView topSourceURL() const = 0; + virtual int topLineNumber() const = 0; + virtual int topColumnNumber() const = 0; + virtual StringView topScriptId() const = 0; + virtual StringView topFunctionName() const = 0; + + virtual ~V8StackTrace() {} + virtual std::unique_ptr + buildInspectorObject() const = 0; + virtual std::unique_ptr toString() const = 0; + + // Safe to pass between threads, drops async chain. + virtual std::unique_ptr clone() = 0; +}; + +class V8_EXPORT V8InspectorSession { + public: + virtual ~V8InspectorSession() {} + + // Cross-context inspectable values (DOM nodes in different worlds, etc.). + class V8_EXPORT Inspectable { + public: + virtual v8::Local get(v8::Local) = 0; + virtual ~Inspectable() {} + }; + virtual void addInspectedObject(std::unique_ptr) = 0; + + // Dispatching protocol messages. + static bool canDispatchMethod(const StringView& method); + virtual void dispatchProtocolMessage(const StringView& message) = 0; + virtual std::unique_ptr stateJSON() = 0; + virtual std::vector> + supportedDomains() = 0; + + // Debugger actions. + virtual void schedulePauseOnNextStatement(const StringView& breakReason, + const StringView& breakDetails) = 0; + virtual void cancelPauseOnNextStatement() = 0; + virtual void breakProgram(const StringView& breakReason, + const StringView& breakDetails) = 0; + virtual void setSkipAllPauses(bool) = 0; + virtual void resume() = 0; + virtual void stepOver() = 0; + virtual std::vector> + searchInTextByLines(const StringView& text, const StringView& query, + bool caseSensitive, bool isRegex) = 0; + + // Remote objects. + virtual std::unique_ptr wrapObject( + v8::Local, v8::Local, + const StringView& groupName) = 0; + virtual bool unwrapObject(std::unique_ptr* error, + const StringView& objectId, v8::Local*, + v8::Local*, + std::unique_ptr* objectGroup) = 0; + virtual void releaseObjectGroup(const StringView&) = 0; +}; + +enum class V8ConsoleAPIType { kClear, kDebug, kLog, kInfo, kWarning, kError }; + +class V8_EXPORT V8InspectorClient { + public: + virtual ~V8InspectorClient() {} + + virtual void runMessageLoopOnPause(int contextGroupId) {} + virtual void quitMessageLoopOnPause() {} + virtual void runIfWaitingForDebugger(int contextGroupId) {} + + virtual void muteMetrics(int contextGroupId) {} + virtual void unmuteMetrics(int contextGroupId) {} + + virtual void beginUserGesture() {} + virtual void endUserGesture() {} + + virtual std::unique_ptr valueSubtype(v8::Local) { + return nullptr; + } + virtual bool formatAccessorsAsProperties(v8::Local) { + return false; + } + virtual bool isInspectableHeapObject(v8::Local) { return true; } + + virtual v8::Local ensureDefaultContextInGroup( + int contextGroupId) { + return v8::Local(); + } + virtual void beginEnsureAllContextsInGroup(int contextGroupId) {} + virtual void endEnsureAllContextsInGroup(int contextGroupId) {} + + virtual void installAdditionalCommandLineAPI(v8::Local, + v8::Local) {} + virtual void consoleAPIMessage(int contextGroupId, V8ConsoleAPIType, + const StringView& message, + const StringView& url, unsigned lineNumber, + unsigned columnNumber, V8StackTrace*) {} + virtual v8::MaybeLocal memoryInfo(v8::Isolate*, + v8::Local) { + return v8::MaybeLocal(); + } + + virtual void consoleTime(const StringView& title) {} + virtual void consoleTimeEnd(const StringView& title) {} + virtual void consoleTimeStamp(const StringView& title) {} + virtual double currentTimeMS() { return 0; } + typedef void (*TimerCallback)(void*); + virtual void startRepeatingTimer(double, TimerCallback, void* data) {} + virtual void cancelTimer(void* data) {} + + // TODO(dgozman): this was added to support service worker shadow page. We + // should not connect at all. + virtual bool canExecuteScripts(int contextGroupId) { return true; } +}; + +class V8_EXPORT V8Inspector { + public: + static std::unique_ptr create(v8::Isolate*, V8InspectorClient*); + virtual ~V8Inspector() {} + + // Contexts instrumentation. + virtual void contextCreated(const V8ContextInfo&) = 0; + virtual void contextDestroyed(v8::Local) = 0; + virtual void resetContextGroup(int contextGroupId) = 0; + + // Various instrumentation. + virtual void willExecuteScript(v8::Local, int scriptId) = 0; + virtual void didExecuteScript(v8::Local) = 0; + virtual void idleStarted() = 0; + virtual void idleFinished() = 0; + + // Async stack traces instrumentation. + virtual void asyncTaskScheduled(const StringView& taskName, void* task, + bool recurring) = 0; + virtual void asyncTaskCanceled(void* task) = 0; + virtual void asyncTaskStarted(void* task) = 0; + virtual void asyncTaskFinished(void* task) = 0; + virtual void allAsyncTasksCanceled() = 0; + + // Exceptions instrumentation. + virtual unsigned exceptionThrown( + v8::Local, const StringView& message, + v8::Local exception, const StringView& detailedMessage, + const StringView& url, unsigned lineNumber, unsigned columnNumber, + std::unique_ptr, int scriptId) = 0; + virtual void exceptionRevoked(v8::Local, unsigned exceptionId, + const StringView& message) = 0; + + // Connection. + class V8_EXPORT Channel { + public: + virtual ~Channel() {} + virtual void sendProtocolResponse(int callId, + const StringView& message) = 0; + virtual void sendProtocolNotification(const StringView& message) = 0; + virtual void flushProtocolNotifications() = 0; + }; + virtual std::unique_ptr connect( + int contextGroupId, Channel*, const StringView& state) = 0; + + // API methods. + virtual std::unique_ptr createStackTrace( + v8::Local) = 0; + virtual std::unique_ptr captureStackTrace(bool fullStack) = 0; +}; + +} // namespace v8_inspector + +#endif // V8_V8_INSPECTOR_H_ diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h index 11f8d51f02573e..d0b7aed8fc5a33 100644 --- a/deps/v8/include/v8-platform.h +++ b/deps/v8/include/v8-platform.h @@ -152,9 +152,9 @@ class Platform { */ virtual uint64_t AddTraceEvent( char phase, const uint8_t* category_enabled_flag, const char* name, - uint64_t id, uint64_t bind_id, int32_t num_args, const char** arg_names, - const uint8_t* arg_types, const uint64_t* arg_values, - unsigned int flags) { + const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args, + const char** arg_names, const uint8_t* arg_types, + const uint64_t* arg_values, unsigned int flags) { return 0; } @@ -164,6 +164,19 @@ class Platform { **/ virtual void UpdateTraceEventDuration(const uint8_t* category_enabled_flag, const char* name, uint64_t handle) {} + + class TraceStateObserver { + public: + virtual ~TraceStateObserver() = default; + virtual void OnTraceEnabled() = 0; + virtual void OnTraceDisabled() = 0; + }; + + /** Adds tracing state change observer. */ + virtual void AddTraceStateObserver(TraceStateObserver*) {} + + /** Removes tracing state change observer. */ + virtual void RemoveTraceStateObserver(TraceStateObserver*) {} }; } // namespace v8 diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h index 007ae2eca55e41..55c8386bdeff4a 100644 --- a/deps/v8/include/v8-profiler.h +++ b/deps/v8/include/v8-profiler.h @@ -46,6 +46,75 @@ template class V8_EXPORT std::vector; namespace v8 { +// TickSample captures the information collected for each sample. +struct TickSample { + // Internal profiling (with --prof + tools/$OS-tick-processor) wants to + // include the runtime function we're calling. Externally exposed tick + // samples don't care. + enum RecordCEntryFrame { kIncludeCEntryFrame, kSkipCEntryFrame }; + + TickSample() + : state(OTHER), + pc(nullptr), + external_callback_entry(nullptr), + frames_count(0), + has_external_callback(false), + update_stats(true) {} + + /** + * Initialize a tick sample from the isolate. + * \param isolate The isolate. + * \param state Execution state. + * \param record_c_entry_frame Include or skip the runtime function. + * \param update_stats Whether update the sample to the aggregated stats. + * \param use_simulator_reg_state When set to true and V8 is running under a + * simulator, the method will use the simulator + * register state rather than the one provided + * with |state| argument. Otherwise the method + * will use provided register |state| as is. + */ + void Init(Isolate* isolate, const v8::RegisterState& state, + RecordCEntryFrame record_c_entry_frame, bool update_stats, + bool use_simulator_reg_state = true); + /** + * Get a call stack sample from the isolate. + * \param isolate The isolate. + * \param state Register state. + * \param record_c_entry_frame Include or skip the runtime function. + * \param frames Caller allocated buffer to store stack frames. + * \param frames_limit Maximum number of frames to capture. The buffer must + * be large enough to hold the number of frames. + * \param sample_info The sample info is filled up by the function + * provides number of actual captured stack frames and + * the current VM state. + * \param use_simulator_reg_state When set to true and V8 is running under a + * simulator, the method will use the simulator + * register state rather than the one provided + * with |state| argument. Otherwise the method + * will use provided register |state| as is. + * \note GetStackSample is thread and signal safe and should only be called + * when the JS thread is paused or interrupted. + * Otherwise the behavior is undefined. + */ + static bool GetStackSample(Isolate* isolate, v8::RegisterState* state, + RecordCEntryFrame record_c_entry_frame, + void** frames, size_t frames_limit, + v8::SampleInfo* sample_info, + bool use_simulator_reg_state = true); + StateTag state; // The state of the VM. + void* pc; // Instruction pointer. + union { + void* tos; // Top stack value (*sp). + void* external_callback_entry; + }; + static const unsigned kMaxFramesCountLog2 = 8; + static const unsigned kMaxFramesCount = (1 << kMaxFramesCountLog2) - 1; + void* stack[kMaxFramesCount]; // Call stack. + unsigned frames_count : kMaxFramesCountLog2; // Number of captured frames. + bool has_external_callback : 1; + bool update_stats : 1; // Whether the sample should update aggregated stats. +}; + /** * CpuProfileNode represents a node in a call graph. */ @@ -62,12 +131,26 @@ class V8_EXPORT CpuProfileNode { /** Returns function name (empty string for anonymous functions.) */ Local GetFunctionName() const; + /** + * Returns function name (empty string for anonymous functions.) + * The string ownership is *not* passed to the caller. It stays valid until + * profile is deleted. The function is thread safe. + */ + const char* GetFunctionNameStr() const; + /** Returns id of the script where function is located. */ int GetScriptId() const; /** Returns resource name for script from where the function originates. */ Local GetScriptResourceName() const; + /** + * Returns resource name for script from where the function originates. + * The string ownership is *not* passed to the caller. It stays valid until + * profile is deleted. The function is thread safe. + */ + const char* GetScriptResourceNameStr() const; + /** * Returns the number, 1-based, of the line where the function originates. * kNoLineNumberInfo if no line number information is available. @@ -103,7 +186,9 @@ class V8_EXPORT CpuProfileNode { unsigned GetHitCount() const; /** Returns function entry UID. */ - unsigned GetCallUid() const; + V8_DEPRECATE_SOON( + "Use GetScriptId, GetLineNumber, and GetColumnNumber instead.", + unsigned GetCallUid() const); /** Returns id of the node. The id is unique within the tree */ unsigned GetNodeId() const; @@ -173,13 +258,24 @@ class V8_EXPORT CpuProfile { void Delete(); }; - /** * Interface for controlling CPU profiling. Instance of the - * profiler can be retrieved using v8::Isolate::GetCpuProfiler. + * profiler can be created using v8::CpuProfiler::New method. */ class V8_EXPORT CpuProfiler { public: + /** + * Creates a new CPU profiler for the |isolate|. The isolate must be + * initialized. The profiler object must be disposed after use by calling + * |Dispose| method. + */ + static CpuProfiler* New(Isolate* isolate); + + /** + * Disposes the CPU profiler object. + */ + void Dispose(); + /** * Changes default CPU profiler sampling interval to the specified number * of microseconds. Default interval is 1000us. This method must be called @@ -515,6 +611,11 @@ class V8_EXPORT AllocationProfile { */ class V8_EXPORT HeapProfiler { public: + enum SamplingFlags { + kSamplingNoFlags = 0, + kSamplingForceGC = 1 << 0, + }; + /** * Callback function invoked for obtaining RetainedObjectInfo for * the given JavaScript wrapper object. It is prohibited to enter V8 @@ -640,7 +741,8 @@ class V8_EXPORT HeapProfiler { * Returns false if a sampling heap profiler is already running. */ bool StartSamplingHeapProfiler(uint64_t sample_interval = 512 * 1024, - int stack_depth = 16); + int stack_depth = 16, + SamplingFlags flags = kSamplingNoFlags); /** * Stops the sampling heap profile and discards the current profile. @@ -688,7 +790,6 @@ class V8_EXPORT HeapProfiler { HeapProfiler& operator=(const HeapProfiler&); }; - /** * Interface for providing information about embedder's objects * held by global handles. This information is reported in two ways: @@ -703,7 +804,7 @@ class V8_EXPORT HeapProfiler { * were not previously reported via AddObjectGroup. * * Thus, if an embedder wants to provide information about native - * objects for heap snapshots, he can do it in a GC prologue + * objects for heap snapshots, it can do it in a GC prologue * handler, and / or by assigning wrapper class ids in the following way: * * 1. Bind a callback to class id by calling SetWrapperClassInfoProvider. diff --git a/deps/v8/include/v8-util.h b/deps/v8/include/v8-util.h index 73ec658f7b0abe..8133fdd49dcf4f 100644 --- a/deps/v8/include/v8-util.h +++ b/deps/v8/include/v8-util.h @@ -95,12 +95,12 @@ class DefaultPersistentValueMapTraits : public StdMapTraits { MapType* map, const K& key, Local value) { return NULL; } - static MapType* MapFromWeakCallbackData( - const WeakCallbackData& data) { + static MapType* MapFromWeakCallbackInfo( + const WeakCallbackInfo& data) { return NULL; } - static K KeyFromWeakCallbackData( - const WeakCallbackData& data) { + static K KeyFromWeakCallbackInfo( + const WeakCallbackInfo& data) { return K(); } static void DisposeCallbackData(WeakCallbackDataType* data) { } @@ -205,6 +205,17 @@ class PersistentValueMapBase { reinterpret_cast(FromVal(Traits::Get(&impl_, key)))); } + /** + * Call V8::RegisterExternallyReferencedObject with the map value for given + * key. + */ + void RegisterExternallyReferencedObject(K& key) { + DCHECK(Contains(key)); + V8::RegisterExternallyReferencedObject( + reinterpret_cast(FromVal(Traits::Get(&impl_, key))), + reinterpret_cast(GetIsolate())); + } + /** * Return value for key and remove it from the map. */ @@ -402,11 +413,11 @@ class PersistentValueMap : public PersistentValueMapBase { private: static void WeakCallback( - const WeakCallbackData& data) { + const WeakCallbackInfo& data) { if (Traits::kCallbackType != kNotWeak) { PersistentValueMap* persistentValueMap = - Traits::MapFromWeakCallbackData(data); - K key = Traits::KeyFromWeakCallbackData(data); + Traits::MapFromWeakCallbackInfo(data); + K key = Traits::KeyFromWeakCallbackInfo(data); Traits::Dispose(data.GetIsolate(), persistentValueMap->Remove(key).Pass(), key); Traits::DisposeCallbackData(data.GetParameter()); diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index 52125445b4ee12..e69271c00c5c62 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -9,12 +9,12 @@ // NOTE these macros are used by some of the tool scripts and the build // system so their names cannot be changed without changing the scripts. #define V8_MAJOR_VERSION 5 -#define V8_MINOR_VERSION 1 -#define V8_BUILD_NUMBER 281 -#define V8_PATCH_LEVEL 81 +#define V8_MINOR_VERSION 5 +#define V8_BUILD_NUMBER 0 +#define V8_PATCH_LEVEL 0 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) -#define V8_IS_CANDIDATE_VERSION 0 +#define V8_IS_CANDIDATE_VERSION 1 #endif // V8_INCLUDE_VERSION_H_ diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 8b7b7c2cc48c3b..edba8eaece01ae 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -50,7 +51,7 @@ #else // V8_OS_WIN // Setup for Linux shared library export. -#if V8_HAS_ATTRIBUTE_VISIBILITY && defined(V8_SHARED) +#if V8_HAS_ATTRIBUTE_VISIBILITY # ifdef BUILDING_V8_SHARED # define V8_EXPORT __attribute__ ((visibility("default"))) # else @@ -69,6 +70,7 @@ namespace v8 { class AccessorSignature; class Array; +class ArrayBuffer; class Boolean; class BooleanObject; class Context; @@ -94,6 +96,7 @@ class ObjectTemplate; class Platform; class Primitive; class Promise; +class PropertyDescriptor; class Proxy; class RawOperationDescriptor; class Script; @@ -292,8 +295,8 @@ class Local { return Local(T::Cast(*that)); } - - template V8_INLINE Local As() { + template + V8_INLINE Local As() const { return Local::Cast(*this); } @@ -340,7 +343,7 @@ class Local { #if !defined(V8_IMMINENT_DEPRECATION_WARNINGS) -// Local is an alias for Local for historical reasons. +// Handle is an alias for Local for historical reasons. template using Handle = Local; #endif @@ -457,32 +460,12 @@ class WeakCallbackInfo { }; -template -class WeakCallbackData { - public: - typedef void (*Callback)(const WeakCallbackData& data); - - WeakCallbackData(Isolate* isolate, P* parameter, Local handle) - : isolate_(isolate), parameter_(parameter), handle_(handle) {} - - V8_INLINE Isolate* GetIsolate() const { return isolate_; } - V8_INLINE P* GetParameter() const { return parameter_; } - V8_INLINE Local GetValue() const { return handle_; } - - private: - Isolate* isolate_; - P* parameter_; - Local handle_; -}; - - -// TODO(dcarney): delete this with WeakCallbackData -template -using PhantomCallbackData = WeakCallbackInfo; - - -enum class WeakCallbackType { kParameter, kInternalFields }; - +// kParameter will pass a void* parameter back to the callback, kInternalFields +// will pass the first two internal fields back to the callback, kFinalizer +// will pass a void* parameter back, but is invoked before the object is +// actually collected, so it can be resurrected. In the last case, it is not +// possible to request a second pass callback. +enum class WeakCallbackType { kParameter, kInternalFields, kFinalizer }; /** * An object reference that is independent of any handle scope. Where @@ -561,36 +544,19 @@ template class PersistentBase { * critical form of resource management! */ template - V8_INLINE V8_DEPRECATED( - "use WeakCallbackInfo version", - void SetWeak(P* parameter, - typename WeakCallbackData::Callback callback)); - - template - V8_INLINE V8_DEPRECATED( - "use WeakCallbackInfo version", - void SetWeak(P* parameter, - typename WeakCallbackData::Callback callback)); - - // Phantom persistents work like weak persistents, except that the pointer to - // the object being collected is not available in the finalization callback. - // This enables the garbage collector to collect the object and any objects - // it references transitively in one GC cycle. At the moment you can either - // specify a parameter for the callback or the location of two internal - // fields in the dying object. - template - V8_INLINE V8_DEPRECATED( - "use SetWeak", - void SetPhantom(P* parameter, - typename WeakCallbackInfo

::Callback callback, - int internal_field_index1 = -1, - int internal_field_index2 = -1)); - - template V8_INLINE void SetWeak(P* parameter, typename WeakCallbackInfo

::Callback callback, WeakCallbackType type); + /** + * Turns this handle into a weak phantom handle without finalization callback. + * The handle will be reset automatically when the garbage collector detects + * that the object is no longer reachable. + * A related function Isolate::NumberOfPhantomHandleResetsSinceLastCall + * returns how many phantom handles were reset by the garbage collector. + */ + V8_INLINE void SetWeak(); + template V8_INLINE P* ClearWeak(); @@ -602,7 +568,7 @@ template class PersistentBase { * is alive. Only allowed when the embedder is asked to trace its heap by * EmbedderHeapTracer. */ - V8_INLINE void RegisterExternalReference(Isolate* isolate); + V8_INLINE void RegisterExternalReference(Isolate* isolate) const; /** * Marks the reference to this object independent. Garbage collector is free @@ -620,7 +586,9 @@ template class PersistentBase { * external dependencies. This mark is automatically cleared after each * garbage collection. */ - V8_INLINE void MarkPartiallyDependent(); + V8_INLINE V8_DEPRECATED( + "deprecated optimization, do not use partially dependent groups", + void MarkPartiallyDependent()); /** * Marks the reference to this object as active. The scavenge garbage @@ -649,6 +617,9 @@ template class PersistentBase { */ V8_INLINE uint16_t WrapperClassId() const; + PersistentBase(const PersistentBase& other) = delete; // NOLINT + void operator=(const PersistentBase&) = delete; + private: friend class Isolate; friend class Utils; @@ -664,8 +635,6 @@ template class PersistentBase { friend class Object; explicit V8_INLINE PersistentBase(T* val) : val_(val) {} - PersistentBase(const PersistentBase& other) = delete; // NOLINT - void operator=(const PersistentBase&) = delete; V8_INLINE static T* New(Isolate* isolate, T* that); T* val_; @@ -778,17 +747,18 @@ template class Persistent : public PersistentBase { // TODO(dcarney): this is pretty useless, fix or remove template - V8_INLINE static Persistent& Cast(Persistent& that) { // NOLINT + V8_INLINE static Persistent& Cast(const Persistent& that) { // NOLINT #ifdef V8_ENABLE_CHECKS // If we're going to perform the type check then we have to check // that the handle isn't empty before doing the checked cast. if (!that.IsEmpty()) T::Cast(*that); #endif - return reinterpret_cast&>(that); + return reinterpret_cast&>(const_cast&>(that)); } // TODO(dcarney): this is pretty useless, fix or remove - template V8_INLINE Persistent& As() { // NOLINT + template + V8_INLINE Persistent& As() const { // NOLINT return Persistent::Cast(*this); } @@ -868,11 +838,12 @@ class Global : public PersistentBase { */ typedef void MoveOnlyTypeForCPP03; + Global(const Global&) = delete; + void operator=(const Global&) = delete; + private: template friend class ReturnValue; - Global(const Global&) = delete; - void operator=(const Global&) = delete; V8_INLINE T* operator*() const { return this->val_; } }; @@ -911,6 +882,11 @@ class V8_EXPORT HandleScope { return reinterpret_cast(isolate_); } + HandleScope(const HandleScope&) = delete; + void operator=(const HandleScope&) = delete; + void* operator new(size_t size) = delete; + void operator delete(void*, size_t) = delete; + protected: V8_INLINE HandleScope() {} @@ -924,13 +900,6 @@ class V8_EXPORT HandleScope { static internal::Object** CreateHandle(internal::HeapObject* heap_object, internal::Object* value); - // Make it hard to create heap-allocated or illegal handle scopes by - // disallowing certain operations. - HandleScope(const HandleScope&); - void operator=(const HandleScope&); - void* operator new(size_t size); - void operator delete(void*, size_t); - internal::Isolate* isolate_; internal::Object** prev_next_; internal::Object** prev_limit_; @@ -965,16 +934,13 @@ class V8_EXPORT EscapableHandleScope : public HandleScope { return Local(reinterpret_cast(slot)); } + EscapableHandleScope(const EscapableHandleScope&) = delete; + void operator=(const EscapableHandleScope&) = delete; + void* operator new(size_t size) = delete; + void operator delete(void*, size_t) = delete; + private: internal::Object** Escape(internal::Object** escape_value); - - // Make it hard to create heap-allocated or illegal handle scopes by - // disallowing certain operations. - EscapableHandleScope(const EscapableHandleScope&); - void operator=(const EscapableHandleScope&); - void* operator new(size_t size); - void operator delete(void*, size_t); - internal::Object** escape_slot_; }; @@ -983,15 +949,13 @@ class V8_EXPORT SealHandleScope { SealHandleScope(Isolate* isolate); ~SealHandleScope(); - private: - // Make it hard to create heap-allocated or illegal handle scopes by - // disallowing certain operations. - SealHandleScope(const SealHandleScope&); - void operator=(const SealHandleScope&); - void* operator new(size_t size); - void operator delete(void*, size_t); + SealHandleScope(const SealHandleScope&) = delete; + void operator=(const SealHandleScope&) = delete; + void* operator new(size_t size) = delete; + void operator delete(void*, size_t) = delete; - internal::Isolate* isolate_; + private: + internal::Isolate* const isolate_; internal::Object** prev_limit_; int prev_sealed_level_; }; @@ -1106,6 +1070,47 @@ class V8_EXPORT UnboundScript { static const int kNoScriptId = 0; }; +/** + * This is an unfinished experimental feature, and is only exposed + * here for internal testing purposes. DO NOT USE. + * + * A compiled JavaScript module. + */ +class V8_EXPORT Module { + public: + /** + * Returns the number of modules requested by this module. + */ + int GetModuleRequestsLength() const; + + /** + * Returns the ith module specifier in this module. + * i must be < GetModuleRequestsLength() and >= 0. + */ + Local GetModuleRequest(int i) const; + + void SetEmbedderData(Local data); + Local GetEmbedderData() const; + + typedef MaybeLocal (*ResolveCallback)(Local context, + Local specifier, + Local referrer, + Local data); + + /** + * ModuleDeclarationInstantiation + * + * Returns false if an exception occurred during instantiation. + */ + V8_WARN_UNUSED_RESULT bool Instantiate( + Local context, ResolveCallback callback, + Local callback_data = Local()); + + /** + * ModuleEvaluation + */ + V8_WARN_UNUSED_RESULT MaybeLocal Evaluate(Local context); +}; /** * A compiled JavaScript script, tied to a Context which was active when the @@ -1181,10 +1186,9 @@ class V8_EXPORT ScriptCompiler { bool rejected; BufferPolicy buffer_policy; - private: - // Prevent copying. Not implemented. - CachedData(const CachedData&); - CachedData& operator=(const CachedData&); + // Prevent copying. + CachedData(const CachedData&) = delete; + CachedData& operator=(const CachedData&) = delete; }; /** @@ -1204,11 +1208,12 @@ class V8_EXPORT ScriptCompiler { // alive. V8_INLINE const CachedData* GetCachedData() const; + // Prevent copying. + Source(const Source&) = delete; + Source& operator=(const Source&) = delete; + private: friend class ScriptCompiler; - // Prevent copying. Not implemented. - Source(const Source&); - Source& operator=(const Source&); Local source_string; @@ -1291,11 +1296,11 @@ class V8_EXPORT ScriptCompiler { internal::StreamedSource* impl() const { return impl_; } - private: - // Prevent copying. Not implemented. - StreamedSource(const StreamedSource&); - StreamedSource& operator=(const StreamedSource&); + // Prevent copying. + StreamedSource(const StreamedSource&) = delete; + StreamedSource& operator=(const StreamedSource&) = delete; + private: internal::StreamedSource* impl_; }; @@ -1409,18 +1414,17 @@ class V8_EXPORT ScriptCompiler { static uint32_t CachedDataVersionTag(); /** - * Compile an ES6 module. - * * This is an unfinished experimental feature, and is only exposed - * here for internal testing purposes. - * Only parsing works at the moment. Do not use. + * here for internal testing purposes. DO NOT USE. + * + * Compile an ES module, returning a Module that encapsulates + * the compiled code. * - * TODO(adamk): Script is likely the wrong return value for this; - * should return some new Module type. + * Corresponds to the ParseModule abstract operation in the + * ECMAScript specification. */ - static V8_WARN_UNUSED_RESULT MaybeLocal + + + + + + + +

Runtime Stats Komparator

+ +
+
+

Data

+
+

+ +

+
+
+ + + +
+

Version Selector

+ +
+ +
+

Page Selector

+ +
+ +
+
+ + + + + + +
+
+ + +
+ + + + + + + + + + + +
Pos. Name Time Percent Count 
+
+
+ +
+

Usage

+
    +
  1. Install scipy, e.g. sudo aptitude install python-scipy +
  2. Build chrome.
  3. +
  4. Check out a known working version of webpagereply: +
    git -C $CHROME_DIR/third_party/webpagereplay checkout 7dbd94752d1cde5536ffc623a9e10a51721eff1d
    +
  5. +
  6. Run callstats.py with a web-page-replay archive: +
    $V8_DIR/tools/callstats.py run \
    +        --replay-bin=$CHROME_SRC/third_party/webpagereplay/replay.py \
    +        --replay-wpr=$INPUT_DIR/top25.wpr \
    +        --js-flags="" \
    +        --with-chrome=$CHROME_SRC/out/Release/chrome \
    +        --sites-file=$INPUT_DIR/top25.json
    +
  7. +
  8. Move results file to a subdirectory: mkdir $VERSION_DIR; mv *.txt $VERSION_DIR
  9. +
  10. Repeat from step 1 with a different configuration (e.g. --js-flags="--nolazy").
  11. +
  12. Create the final results file: ./callstats.py json $VERSION_DIR1 $VERSION_DIR2 > result.json
  13. +
  14. Use results.json on this site. +
+
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Page:
Version:
Time:± ±
Percent:± ±
Percent per Entry:
Count:± ±
Overall Impact:± ±
+
+ + diff --git a/deps/v8/tools/callstats.py b/deps/v8/tools/callstats.py new file mode 100755 index 00000000000000..2ae766b7aed8d2 --- /dev/null +++ b/deps/v8/tools/callstats.py @@ -0,0 +1,646 @@ +#!/usr/bin/env python +# Copyright 2016 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +''' +Usage: callstats.py [-h] ... + +Optional arguments: + -h, --help show this help message and exit + +Commands: + run run chrome with --runtime-call-stats and generate logs + stats process logs and print statistics + json process logs from several versions and generate JSON + help help information + +For each command, you can try ./runtime-call-stats.py help command. +''' + +import argparse +import json +import os +import re +import shutil +import subprocess +import sys +import tempfile +import operator + +import numpy +import scipy +import scipy.stats +from math import sqrt + + +# Run benchmarks. + +def print_command(cmd_args): + def fix_for_printing(arg): + m = re.match(r'^--([^=]+)=(.*)$', arg) + if m and (' ' in m.group(2) or m.group(2).startswith('-')): + arg = "--{}='{}'".format(m.group(1), m.group(2)) + elif ' ' in arg: + arg = "'{}'".format(arg) + return arg + print " ".join(map(fix_for_printing, cmd_args)) + + +def start_replay_server(args, sites): + with tempfile.NamedTemporaryFile(prefix='callstats-inject-', suffix='.js', + mode='wt', delete=False) as f: + injection = f.name + generate_injection(f, sites, args.refresh) + http_port = 4080 + args.port_offset + https_port = 4443 + args.port_offset + cmd_args = [ + args.replay_bin, + "--port=%s" % http_port, + "--ssl_port=%s" % https_port, + "--no-dns_forwarding", + "--use_closest_match", + "--no-diff_unknown_requests", + "--inject_scripts=deterministic.js,{}".format(injection), + args.replay_wpr, + ] + print "=" * 80 + print_command(cmd_args) + with open(os.devnull, 'w') as null: + server = subprocess.Popen(cmd_args, stdout=null, stderr=null) + print "RUNNING REPLAY SERVER: %s with PID=%s" % (args.replay_bin, server.pid) + print "=" * 80 + return {'process': server, 'injection': injection} + + +def stop_replay_server(server): + print("SHUTTING DOWN REPLAY SERVER %s" % server['process'].pid) + server['process'].terminate() + os.remove(server['injection']) + + +def generate_injection(f, sites, refreshes=0): + print >> f, """\ +(function() { + var s = window.sessionStorage.getItem("refreshCounter"); + var refreshTotal = """, refreshes, """; + var refreshCounter = s ? parseInt(s) : refreshTotal; + var refreshId = refreshTotal - refreshCounter; + if (refreshCounter > 0) { + window.sessionStorage.setItem("refreshCounter", refreshCounter-1); + } + function match(url, item) { + if ('regexp' in item) { return url.match(item.regexp) !== null }; + var url_wanted = item.url; + /* Allow automatic redirections from http to https. */ + if (url_wanted.startsWith("http://") && url.startsWith("https://")) { + url_wanted = "https://" + url_wanted.substr(7); + } + return url.startsWith(url_wanted); + }; + function onLoad(url) { + for (var item of sites) { + if (!match(url, item)) continue; + var timeout = 'timeline' in item ? 2000 * item.timeline + : 'timeout' in item ? 1000 * (item.timeout - 3) + : 10000; + console.log("Setting time out of " + timeout + " for: " + url); + window.setTimeout(function() { + console.log("Time is out for: " + url); + var msg = "STATS: (" + refreshId + ") " + url; + %GetAndResetRuntimeCallStats(1, msg); + if (refreshCounter > 0) { + console.log( + "Refresh counter is " + refreshCounter + ", refreshing: " + url); + window.location.reload(); + } + }, timeout); + return; + } + console.log("Ignoring: " + url); + }; + var sites = + """, json.dumps(sites), """; + onLoad(window.location.href); +})();""" + + +def run_site(site, domain, args, timeout=None): + print "="*80 + print "RUNNING DOMAIN %s" % domain + print "="*80 + result_template = "{domain}#{count}.txt" if args.repeat else "{domain}.txt" + count = 0 + if timeout is None: timeout = args.timeout + if args.replay_wpr: + timeout *= 1 + args.refresh + timeout += 1 + retries_since_good_run = 0 + while count == 0 or args.repeat is not None and count < args.repeat: + count += 1 + result = result_template.format(domain=domain, count=count) + retries = 0 + while args.retries is None or retries < args.retries: + retries += 1 + try: + if args.user_data_dir: + user_data_dir = args.user_data_dir + else: + user_data_dir = tempfile.mkdtemp(prefix="chr_") + js_flags = "--runtime-call-stats" + if args.replay_wpr: js_flags += " --allow-natives-syntax" + if args.js_flags: js_flags += " " + args.js_flags + chrome_flags = [ + "--no-default-browser-check", + "--no-sandbox", + "--disable-translate", + "--enable-benchmarking", + "--js-flags={}".format(js_flags), + "--no-first-run", + "--user-data-dir={}".format(user_data_dir), + ] + if args.replay_wpr: + http_port = 4080 + args.port_offset + https_port = 4443 + args.port_offset + chrome_flags += [ + "--host-resolver-rules=MAP *:80 localhost:%s, " \ + "MAP *:443 localhost:%s, " \ + "EXCLUDE localhost" % ( + http_port, https_port), + "--ignore-certificate-errors", + "--disable-seccomp-sandbox", + "--disable-web-security", + "--reduce-security-for-testing", + "--allow-insecure-localhost", + ] + else: + chrome_flags += [ + "--single-process", + ] + if args.chrome_flags: + chrome_flags += args.chrome_flags.split() + cmd_args = [ + "timeout", str(timeout), + args.with_chrome + ] + chrome_flags + [ site ] + print "- " * 40 + print_command(cmd_args) + print "- " * 40 + with open(result, "wt") as f: + with open(args.log_stderr or os.devnull, 'at') as err: + status = subprocess.call(cmd_args, stdout=f, stderr=err) + # 124 means timeout killed chrome, 0 means the user was bored first! + # If none of these two happened, then chrome apparently crashed, so + # it must be called again. + if status != 124 and status != 0: + print("CHROME CRASHED, REPEATING RUN"); + continue + # If the stats file is empty, chrome must be called again. + if os.path.isfile(result) and os.path.getsize(result) > 0: + if args.print_url: + with open(result, "at") as f: + print >> f + print >> f, "URL: {}".format(site) + retries_since_good_run = 0 + break + if retries_since_good_run < 6: + timeout += 2 ** retries_since_good_run + retries_since_good_run += 1 + print("EMPTY RESULT, REPEATING RUN ({})".format( + retries_since_good_run)); + finally: + if not args.user_data_dir: + shutil.rmtree(user_data_dir) + + +def read_sites_file(args): + try: + sites = [] + try: + with open(args.sites_file, "rt") as f: + for item in json.load(f): + if 'timeout' not in item: + # This is more-or-less arbitrary. + item['timeout'] = int(1.5 * item['timeline'] + 7) + if item['timeout'] > args.timeout: item['timeout'] = args.timeout + sites.append(item) + except ValueError: + with open(args.sites_file, "rt") as f: + for line in f: + line = line.strip() + if not line or line.startswith('#'): continue + sites.append({'url': line, 'timeout': args.timeout}) + return sites + except IOError as e: + args.error("Cannot read from {}. {}.".format(args.sites_file, e.strerror)) + sys.exit(1) + + +def do_run(args): + # Determine the websites to benchmark. + if args.sites_file: + sites = read_sites_file(args) + else: + sites = [{'url': site, 'timeout': args.timeout} for site in args.sites] + # Disambiguate domains, if needed. + L = [] + domains = {} + for item in sites: + site = item['url'] + domain = None + if args.domain: + domain = args.domain + elif 'domain' in item: + domain = item['domain'] + else: + m = re.match(r'^(https?://)?([^/]+)(/.*)?$', site) + if not m: + args.error("Invalid URL {}.".format(site)) + continue + domain = m.group(2) + entry = [site, domain, None, item['timeout']] + if domain not in domains: + domains[domain] = entry + else: + if not isinstance(domains[domain], int): + domains[domain][2] = 1 + domains[domain] = 1 + domains[domain] += 1 + entry[2] = domains[domain] + L.append(entry) + replay_server = start_replay_server(args, sites) if args.replay_wpr else None + try: + # Run them. + for site, domain, count, timeout in L: + if count is not None: domain = "{}%{}".format(domain, count) + print site, domain, timeout + run_site(site, domain, args, timeout) + finally: + if replay_server: + stop_replay_server(replay_server) + + +# Calculate statistics. + +def statistics(data): + N = len(data) + average = numpy.average(data) + median = numpy.median(data) + low = numpy.min(data) + high= numpy.max(data) + if N > 1: + # evaluate sample variance by setting delta degrees of freedom (ddof) to + # 1. The degree used in calculations is N - ddof + stddev = numpy.std(data, ddof=1) + # Get the endpoints of the range that contains 95% of the distribution + t_bounds = scipy.stats.t.interval(0.95, N-1) + #assert abs(t_bounds[0] + t_bounds[1]) < 1e-6 + # sum mean to the confidence interval + ci = { + 'abs': t_bounds[1] * stddev / sqrt(N), + 'low': average + t_bounds[0] * stddev / sqrt(N), + 'high': average + t_bounds[1] * stddev / sqrt(N) + } + else: + stddev = 0 + ci = { 'abs': 0, 'low': average, 'high': average } + if abs(stddev) > 0.0001 and abs(average) > 0.0001: + ci['perc'] = t_bounds[1] * stddev / sqrt(N) / average * 100 + else: + ci['perc'] = 0 + return { 'samples': N, 'average': average, 'median': median, + 'stddev': stddev, 'min': low, 'max': high, 'ci': ci } + + +def read_stats(path, domain, args): + groups = []; + if args.aggregate: + groups = [ + ('Group-IC', re.compile(".*IC.*")), + ('Group-Optimize', + re.compile("StackGuard|.*Optimize.*|.*Deoptimize.*|Recompile.*")), + ('Group-Compile', re.compile(".*Compile.*")), + ('Group-Parse', re.compile(".*Parse.*")), + ('Group-Callback', re.compile(".*Callback.*")), + ('Group-API', re.compile(".*API.*")), + ('Group-GC', re.compile("GC|AllocateInTargetSpace")), + ('Group-JavaScript', re.compile("JS_Execution")), + ('Group-Runtime', re.compile(".*"))] + with open(path, "rt") as f: + # Process the whole file and sum repeating entries. + entries = { 'Sum': {'time': 0, 'count': 0} } + for group_name, regexp in groups: + entries[group_name] = { 'time': 0, 'count': 0 } + for line in f: + line = line.strip() + # Discard headers and footers. + if not line: continue + if line.startswith("Runtime Function"): continue + if line.startswith("===="): continue + if line.startswith("----"): continue + if line.startswith("URL:"): continue + if line.startswith("STATS:"): continue + # We have a regular line. + fields = line.split() + key = fields[0] + time = float(fields[1].replace("ms", "")) + count = int(fields[3]) + if key not in entries: entries[key] = { 'time': 0, 'count': 0 } + entries[key]['time'] += time + entries[key]['count'] += count + # We calculate the sum, if it's not the "total" line. + if key != "Total": + entries['Sum']['time'] += time + entries['Sum']['count'] += count + for group_name, regexp in groups: + if not regexp.match(key): continue + entries[group_name]['time'] += time + entries[group_name]['count'] += count + break + # Calculate the V8-Total (all groups except Callback) + total_v8 = { 'time': 0, 'count': 0 } + for group_name, regexp in groups: + if group_name == 'Group-Callback': continue + total_v8['time'] += entries[group_name]['time'] + total_v8['count'] += entries[group_name]['count'] + entries['Group-Total-V8'] = total_v8 + # Append the sums as single entries to domain. + for key in entries: + if key not in domain: domain[key] = { 'time_list': [], 'count_list': [] } + domain[key]['time_list'].append(entries[key]['time']) + domain[key]['count_list'].append(entries[key]['count']) + + +def print_stats(S, args): + # Sort by ascending/descending time average, then by ascending/descending + # count average, then by ascending name. + def sort_asc_func(item): + return (item[1]['time_stat']['average'], + item[1]['count_stat']['average'], + item[0]) + def sort_desc_func(item): + return (-item[1]['time_stat']['average'], + -item[1]['count_stat']['average'], + item[0]) + # Sorting order is in the commend-line arguments. + sort_func = sort_asc_func if args.sort == "asc" else sort_desc_func + # Possibly limit how many elements to print. + L = [item for item in sorted(S.items(), key=sort_func) + if item[0] not in ["Total", "Sum"]] + N = len(L) + if args.limit == 0: + low, high = 0, N + elif args.sort == "desc": + low, high = 0, args.limit + else: + low, high = N-args.limit, N + # How to print entries. + def print_entry(key, value): + def stats(s, units=""): + conf = "{:0.1f}({:0.2f}%)".format(s['ci']['abs'], s['ci']['perc']) + return "{:8.1f}{} +/- {:15s}".format(s['average'], units, conf) + print "{:>50s} {} {}".format( + key, + stats(value['time_stat'], units="ms"), + stats(value['count_stat']) + ) + # Print and calculate partial sums, if necessary. + for i in range(low, high): + print_entry(*L[i]) + if args.totals and args.limit != 0 and not args.aggregate: + if i == low: + partial = { 'time_list': [0] * len(L[i][1]['time_list']), + 'count_list': [0] * len(L[i][1]['count_list']) } + assert len(partial['time_list']) == len(L[i][1]['time_list']) + assert len(partial['count_list']) == len(L[i][1]['count_list']) + for j, v in enumerate(L[i][1]['time_list']): + partial['time_list'][j] += v + for j, v in enumerate(L[i][1]['count_list']): + partial['count_list'][j] += v + # Print totals, if necessary. + if args.totals: + print '-' * 80 + if args.limit != 0 and not args.aggregate: + partial['time_stat'] = statistics(partial['time_list']) + partial['count_stat'] = statistics(partial['count_list']) + print_entry("Partial", partial) + print_entry("Sum", S["Sum"]) + print_entry("Total", S["Total"]) + + +def do_stats(args): + domains = {} + for path in args.logfiles: + filename = os.path.basename(path) + m = re.match(r'^([^#]+)(#.*)?$', filename) + domain = m.group(1) + if domain not in domains: domains[domain] = {} + read_stats(path, domains[domain], args) + if args.aggregate: + create_total_page_stats(domains, args) + for i, domain in enumerate(sorted(domains)): + if len(domains) > 1: + if i > 0: print + print "{}:".format(domain) + print '=' * 80 + domain_stats = domains[domain] + for key in domain_stats: + domain_stats[key]['time_stat'] = \ + statistics(domain_stats[key]['time_list']) + domain_stats[key]['count_stat'] = \ + statistics(domain_stats[key]['count_list']) + print_stats(domain_stats, args) + + +# Create a Total page with all entries summed up. +def create_total_page_stats(domains, args): + total = {} + def sum_up(parent, key, other): + sums = parent[key] + for i, item in enumerate(other[key]): + if i >= len(sums): + sums.extend([0] * (i - len(sums) + 1)) + if item is not None: + sums[i] += item + # Sum up all the entries/metrics from all domains + for domain, entries in domains.items(): + for key, domain_stats in entries.items(): + if key not in total: + total[key] = {} + total[key]['time_list'] = list(domain_stats['time_list']) + total[key]['count_list'] = list(domain_stats['count_list']) + else: + sum_up(total[key], 'time_list', domain_stats) + sum_up(total[key], 'count_list', domain_stats) + # Add a new "Total" page containing the summed up metrics. + domains['Total'] = total + + +# Generate JSON file. + +def do_json(args): + versions = {} + for path in args.logdirs: + if os.path.isdir(path): + for root, dirs, files in os.walk(path): + version = os.path.basename(root) + if version not in versions: versions[version] = {} + for filename in files: + if filename.endswith(".txt"): + m = re.match(r'^([^#]+)(#.*)?\.txt$', filename) + domain = m.group(1) + if domain not in versions[version]: versions[version][domain] = {} + read_stats(os.path.join(root, filename), + versions[version][domain], args) + for version, domains in versions.items(): + if args.aggregate: + create_total_page_stats(domains, args) + for domain, entries in domains.items(): + stats = [] + for name, value in entries.items(): + # We don't want the calculated sum in the JSON file. + if name == "Sum": continue + entry = [name] + for x in ['time_list', 'count_list']: + s = statistics(entries[name][x]) + entry.append(round(s['average'], 1)) + entry.append(round(s['ci']['abs'], 1)) + entry.append(round(s['ci']['perc'], 2)) + stats.append(entry) + domains[domain] = stats + print json.dumps(versions, separators=(',', ':')) + + +# Help. + +def do_help(parser, subparsers, args): + if args.help_cmd: + if args.help_cmd in subparsers: + subparsers[args.help_cmd].print_help() + else: + args.error("Unknown command '{}'".format(args.help_cmd)) + else: + parser.print_help() + + +# Main program, parse command line and execute. + +def coexist(*l): + given = sum(1 for x in l if x) + return given == 0 or given == len(l) + +def main(): + parser = argparse.ArgumentParser() + subparser_adder = parser.add_subparsers(title="commands", dest="command", + metavar="") + subparsers = {} + # Command: run. + subparsers["run"] = subparser_adder.add_parser( + "run", help="run --help") + subparsers["run"].set_defaults( + func=do_run, error=subparsers["run"].error) + subparsers["run"].add_argument( + "--chrome-flags", type=str, default="", + help="specify additional chrome flags") + subparsers["run"].add_argument( + "--js-flags", type=str, default="", + help="specify additional V8 flags") + subparsers["run"].add_argument( + "--domain", type=str, default="", + help="specify the output file domain name") + subparsers["run"].add_argument( + "--no-url", dest="print_url", action="store_false", default=True, + help="do not include url in statistics file") + subparsers["run"].add_argument( + "-n", "--repeat", type=int, metavar="", + help="specify iterations for each website (default: once)") + subparsers["run"].add_argument( + "-k", "--refresh", type=int, metavar="", default=0, + help="specify refreshes for each iteration (default: 0)") + subparsers["run"].add_argument( + "--replay-wpr", type=str, metavar="", + help="use the specified web page replay (.wpr) archive") + subparsers["run"].add_argument( + "--replay-bin", type=str, metavar="", + help="specify the replay.py script typically located in " \ + "$CHROMIUM/src/third_party/webpagereplay/replay.py") + subparsers["run"].add_argument( + "-r", "--retries", type=int, metavar="", + help="specify retries if website is down (default: forever)") + subparsers["run"].add_argument( + "-f", "--sites-file", type=str, metavar="", + help="specify file containing benchmark websites") + subparsers["run"].add_argument( + "-t", "--timeout", type=int, metavar="", default=60, + help="specify seconds before chrome is killed") + subparsers["run"].add_argument( + "-p", "--port-offset", type=int, metavar="", default=0, + help="specify the offset for the replay server's default ports") + subparsers["run"].add_argument( + "-u", "--user-data-dir", type=str, metavar="", + help="specify user data dir (default is temporary)") + subparsers["run"].add_argument( + "-c", "--with-chrome", type=str, metavar="", + default="/usr/bin/google-chrome", + help="specify chrome executable to use") + subparsers["run"].add_argument( + "-l", "--log-stderr", type=str, metavar="", + help="specify where chrome's stderr should go (default: /dev/null)") + subparsers["run"].add_argument( + "sites", type=str, metavar="", nargs="*", + help="specify benchmark website") + # Command: stats. + subparsers["stats"] = subparser_adder.add_parser( + "stats", help="stats --help") + subparsers["stats"].set_defaults( + func=do_stats, error=subparsers["stats"].error) + subparsers["stats"].add_argument( + "-l", "--limit", type=int, metavar="", default=0, + help="limit how many items to print (default: none)") + subparsers["stats"].add_argument( + "-s", "--sort", choices=["asc", "desc"], default="asc", + help="specify sorting order (default: ascending)") + subparsers["stats"].add_argument( + "-n", "--no-total", dest="totals", action="store_false", default=True, + help="do not print totals") + subparsers["stats"].add_argument( + "logfiles", type=str, metavar="", nargs="*", + help="specify log files to parse") + subparsers["stats"].add_argument( + "--aggregate", dest="aggregate", action="store_true", default=False, + help="Create aggregated entries. Adds Group-* entries at the toplevel. " + + "Additionally creates a Total page with all entries.") + # Command: json. + subparsers["json"] = subparser_adder.add_parser( + "json", help="json --help") + subparsers["json"].set_defaults( + func=do_json, error=subparsers["json"].error) + subparsers["json"].add_argument( + "logdirs", type=str, metavar="", nargs="*", + help="specify directories with log files to parse") + subparsers["json"].add_argument( + "--aggregate", dest="aggregate", action="store_true", default=False, + help="Create aggregated entries. Adds Group-* entries at the toplevel. " + + "Additionally creates a Total page with all entries.") + # Command: help. + subparsers["help"] = subparser_adder.add_parser( + "help", help="help information") + subparsers["help"].set_defaults( + func=lambda args: do_help(parser, subparsers, args), + error=subparsers["help"].error) + subparsers["help"].add_argument( + "help_cmd", type=str, metavar="", nargs="?", + help="command for which to display help") + # Execute the command. + args = parser.parse_args() + setattr(args, 'script_path', os.path.dirname(sys.argv[0])) + if args.command == "run" and coexist(args.sites_file, args.sites): + args.error("use either option --sites-file or site URLs") + sys.exit(1) + elif args.command == "run" and not coexist(args.replay_wpr, args.replay_bin): + args.error("options --replay-wpr and --replay-bin must be used together") + sys.exit(1) + else: + args.func(args) + +if __name__ == "__main__": + sys.exit(main()) diff --git a/deps/v8/tools/check-static-initializers.gyp b/deps/v8/tools/check-static-initializers.gyp index 547a6c873bb873..cfeacfc89fb08e 100644 --- a/deps/v8/tools/check-static-initializers.gyp +++ b/deps/v8/tools/check-static-initializers.gyp @@ -13,8 +13,8 @@ '../src/d8.gyp:d8_run', ], 'includes': [ - '../build/features.gypi', - '../build/isolate.gypi', + '../gypfiles/features.gypi', + '../gypfiles/isolate.gypi', ], 'sources': [ 'check-static-initializers.isolate', diff --git a/deps/v8/tools/codemap.js b/deps/v8/tools/codemap.js index fa6c36b50b9a1a..30cdc21db50160 100644 --- a/deps/v8/tools/codemap.js +++ b/deps/v8/tools/codemap.js @@ -245,6 +245,14 @@ CodeMap.prototype.getAllStaticEntries = function() { }; +/** + * Returns an array of pairs of all static code entries and their addresses. + */ +CodeMap.prototype.getAllStaticEntriesWithAddresses = function() { + return this.statics_.exportKeysAndValues(); +}; + + /** * Returns an array of all libraries entries. */ diff --git a/deps/v8/tools/detect-builtins.js b/deps/v8/tools/detect-builtins.js index 2a476baa4b031c..90bdc08860f2e2 100644 --- a/deps/v8/tools/detect-builtins.js +++ b/deps/v8/tools/detect-builtins.js @@ -24,6 +24,8 @@ } // Avoid endless recursion. if (this_name === "prototype" && name === "constructor") continue; + // Avoid needless duplication. + if (this_name === "__PROTO__" && name === "constructor") continue; // Could get this from the parent, but having it locally is easier. var property = { "name": name }; try { @@ -39,9 +41,18 @@ property.length = value.length; property.prototype = GetProperties("prototype", value.prototype); } - property.properties = GetProperties(name, value); + if (type === "string" || type === "number") { + property.value = value; + } else { + property.properties = GetProperties(name, value); + } result[name] = property; } + // Print the __proto__ if it's not the default Object prototype. + if (typeof object === "object" && object.__proto__ !== null && + !object.__proto__.hasOwnProperty("__proto__")) { + result.__PROTO__ = GetProperties("__PROTO__", object.__proto__); + } return result; }; diff --git a/deps/v8/tools/dev/v8gen.py b/deps/v8/tools/dev/v8gen.py new file mode 100755 index 00000000000000..f0fb74b7094608 --- /dev/null +++ b/deps/v8/tools/dev/v8gen.py @@ -0,0 +1,309 @@ +#!/usr/bin/env python +# Copyright 2016 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Script to generate V8's gn arguments based on common developer defaults +or builder configurations. + +Goma is used by default if detected. The compiler proxy is assumed to run. + +This script can be added to the PATH and be used on other checkouts. It always +runs for the checkout nesting the CWD. + +Configurations of this script live in infra/mb/mb_config.pyl. + +Available actions are: {gen,list}. Omitting the action defaults to "gen". + +------------------------------------------------------------------------------- + +Examples: + +# Generate the ia32.release config in out.gn/ia32.release. +v8gen.py ia32.release + +# Generate into out.gn/foo without goma auto-detect. +v8gen.py gen -b ia32.release foo --no-goma + +# Pass additional gn arguments after -- (don't use spaces within gn args). +v8gen.py ia32.optdebug -- v8_enable_slow_dchecks=true + +# Generate gn arguments of 'V8 Linux64 - builder' from 'client.v8'. To switch +# off goma usage here, the args.gn file must be edited manually. +v8gen.py -m client.v8 -b 'V8 Linux64 - builder' + +# Show available configurations. +v8gen.py list + +------------------------------------------------------------------------------- +""" + +import argparse +import os +import re +import subprocess +import sys + +CONFIG = os.path.join('infra', 'mb', 'mb_config.pyl') +GOMA_DEFAULT = os.path.join(os.path.expanduser("~"), 'goma') +OUT_DIR = 'out.gn' + +TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +sys.path.append(os.path.join(TOOLS_PATH, 'mb')) + +import mb + + +def _sanitize_nonalpha(text): + return re.sub(r'[^a-zA-Z0-9.]', '_', text) + + +class GenerateGnArgs(object): + def __init__(self, args): + # Split args into this script's arguments and gn args passed to the + # wrapped gn. + index = args.index('--') if '--' in args else len(args) + self._options = self._parse_arguments(args[:index]) + self._gn_args = args[index + 1:] + + def _parse_arguments(self, args): + self.parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawTextHelpFormatter, + ) + + def add_common_options(p): + p.add_argument( + '-m', '--master', default='developer_default', + help='config group or master from mb_config.pyl - default: ' + 'developer_default') + p.add_argument( + '-v', '--verbosity', action='count', + help='print wrapped commands (use -vv to print output of wrapped ' + 'commands)') + + subps = self.parser.add_subparsers() + + # Command: gen. + gen_cmd = subps.add_parser( + 'gen', help='generate a new set of build files (default)') + gen_cmd.set_defaults(func=self.cmd_gen) + add_common_options(gen_cmd) + gen_cmd.add_argument( + 'outdir', nargs='?', + help='optional gn output directory') + gen_cmd.add_argument( + '-b', '--builder', + help='build configuration or builder name from mb_config.pyl, e.g. ' + 'x64.release') + gen_cmd.add_argument( + '-p', '--pedantic', action='store_true', + help='run gn over command-line gn args to catch errors early') + + goma = gen_cmd.add_mutually_exclusive_group() + goma.add_argument( + '-g' , '--goma', + action='store_true', default=None, dest='goma', + help='force using goma') + goma.add_argument( + '--nogoma', '--no-goma', + action='store_false', default=None, dest='goma', + help='don\'t use goma auto detection - goma might still be used if ' + 'specified as a gn arg') + + # Command: list. + list_cmd = subps.add_parser( + 'list', help='list available configurations') + list_cmd.set_defaults(func=self.cmd_list) + add_common_options(list_cmd) + + # Default to "gen" unless global help is requested. + if not args or args[0] not in subps.choices.keys() + ['-h', '--help']: + args = ['gen'] + args + + return self.parser.parse_args(args) + + def cmd_gen(self): + if not self._options.outdir and not self._options.builder: + self.parser.error('please specify either an output directory or ' + 'a builder/config name (-b), e.g. x64.release') + + if not self._options.outdir: + # Derive output directory from builder name. + self._options.outdir = _sanitize_nonalpha(self._options.builder) + else: + # Also, if this should work on windows, we might need to use \ where + # outdir is used as path, while using / if it's used in a gn context. + if self._options.outdir.startswith('/'): + self.parser.error( + 'only output directories relative to %s are supported' % OUT_DIR) + + if not self._options.builder: + # Derive builder from output directory. + self._options.builder = self._options.outdir + + # Check for builder/config in mb config. + if self._options.builder not in self._mbw.masters[self._options.master]: + print '%s does not exist in %s for %s' % ( + self._options.builder, CONFIG, self._options.master) + return 1 + + # TODO(machenbach): Check if the requested configurations has switched to + # gn at all. + + # The directories are separated with slashes in a gn context (platform + # independent). + gn_outdir = '/'.join([OUT_DIR, self._options.outdir]) + + # Call MB to generate the basic configuration. + self._call_cmd([ + sys.executable, + '-u', os.path.join('tools', 'mb', 'mb.py'), + 'gen', + '-f', CONFIG, + '-m', self._options.master, + '-b', self._options.builder, + gn_outdir, + ]) + + # Handle extra gn arguments. + gn_args_path = os.path.join(OUT_DIR, self._options.outdir, 'args.gn') + + # Append command-line args. + modified = self._append_gn_args( + 'command-line', gn_args_path, '\n'.join(self._gn_args)) + + # Append goma args. + # TODO(machenbach): We currently can't remove existing goma args from the + # original config. E.g. to build like a bot that uses goma, but switch + # goma off. + modified |= self._append_gn_args( + 'goma', gn_args_path, self._goma_args) + + # Regenerate ninja files to check for errors in the additional gn args. + if modified and self._options.pedantic: + self._call_cmd(['gn', 'gen', gn_outdir]) + return 0 + + def cmd_list(self): + print '\n'.join(sorted(self._mbw.masters[self._options.master])) + return 0 + + def verbose_print_1(self, text): + if self._options.verbosity >= 1: + print '#' * 80 + print text + + def verbose_print_2(self, text): + if self._options.verbosity >= 2: + indent = ' ' * 2 + for l in text.splitlines(): + print indent + l + + def _call_cmd(self, args): + self.verbose_print_1(' '.join(args)) + try: + output = subprocess.check_output( + args=args, + stderr=subprocess.STDOUT, + ) + self.verbose_print_2(output) + except subprocess.CalledProcessError as e: + self.verbose_print_2(e.output) + raise + + def _find_work_dir(self, path): + """Find the closest v8 root to `path`.""" + if os.path.exists(os.path.join(path, 'tools', 'dev', 'v8gen.py')): + # Approximate the v8 root dir by a folder where this script exists + # in the expected place. + return path + elif os.path.dirname(path) == path: + raise Exception( + 'This appears to not be called from a recent v8 checkout') + else: + return self._find_work_dir(os.path.dirname(path)) + + @property + def _goma_dir(self): + return os.path.normpath(os.environ.get('GOMA_DIR') or GOMA_DEFAULT) + + @property + def _need_goma_dir(self): + return self._goma_dir != GOMA_DEFAULT + + @property + def _use_goma(self): + if self._options.goma is None: + # Auto-detect. + return os.path.exists(self._goma_dir) and os.path.isdir(self._goma_dir) + else: + return self._options.goma + + @property + def _goma_args(self): + """Gn args for using goma.""" + # Specify goma args if we want to use goma and if goma isn't specified + # via command line already. The command-line always has precedence over + # any other specification. + if (self._use_goma and + not any(re.match(r'use_goma\s*=.*', x) for x in self._gn_args)): + if self._need_goma_dir: + return 'use_goma=true\ngoma_dir="%s"' % self._goma_dir + else: + return 'use_goma=true' + else: + return '' + + def _append_gn_args(self, type, gn_args_path, more_gn_args): + """Append extra gn arguments to the generated args.gn file.""" + if not more_gn_args: + return False + self.verbose_print_1('Appending """\n%s\n""" to %s.' % ( + more_gn_args, os.path.abspath(gn_args_path))) + with open(gn_args_path, 'a') as f: + f.write('\n# Additional %s args:\n' % type) + f.write(more_gn_args) + f.write('\n') + + # Artificially increment modification time as our modifications happen too + # fast. This makes sure that gn is properly rebuilding the ninja files. + mtime = os.path.getmtime(gn_args_path) + 1 + with open(gn_args_path, 'aw'): + os.utime(gn_args_path, (mtime, mtime)) + + return True + + def main(self): + # Always operate relative to the base directory for better relative-path + # handling. This script can be used in any v8 checkout. + workdir = self._find_work_dir(os.getcwd()) + if workdir != os.getcwd(): + self.verbose_print_1('cd ' + workdir) + os.chdir(workdir) + + # Initialize MB as a library. + self._mbw = mb.MetaBuildWrapper() + + # TODO(machenbach): Factor out common methods independent of mb arguments. + self._mbw.ParseArgs(['lookup', '-f', CONFIG]) + self._mbw.ReadConfigFile() + + if not self._options.master in self._mbw.masters: + print '%s not found in %s\n' % (self._options.master, CONFIG) + print 'Choose one of:\n%s\n' % ( + '\n'.join(sorted(self._mbw.masters.keys()))) + return 1 + + return self._options.func() + + +if __name__ == "__main__": + gen = GenerateGnArgs(sys.argv[1:]) + try: + sys.exit(gen.main()) + except Exception: + if gen._options.verbosity < 2: + print ('\nHint: You can raise verbosity (-vv) to see the output of ' + 'failed commands.\n') + raise diff --git a/deps/v8/tools/dump-cpp.py b/deps/v8/tools/dump-cpp.py new file mode 100644 index 00000000000000..5198ecab21f3f9 --- /dev/null +++ b/deps/v8/tools/dump-cpp.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python +# Copyright 2016 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# This script executes dumpcpp.js, collects all dumped C++ symbols, +# and merges them back into v8 log. + +import os +import platform +import re +import subprocess +import sys + +def is_file_executable(fPath): + return os.path.isfile(fPath) and os.access(fPath, os.X_OK) + +if __name__ == '__main__': + JS_FILES = ['splaytree.js', 'codemap.js', 'csvparser.js', 'consarray.js', + 'profile.js', 'logreader.js', 'tickprocessor.js', 'SourceMap.js', + 'dumpcpp.js', 'dumpcpp-driver.js'] + tools_path = os.path.dirname(os.path.realpath(__file__)) + on_windows = platform.system() == 'Windows' + JS_FILES = [os.path.join(tools_path, f) for f in JS_FILES] + + args = [] + log_file = 'v8.log' + debug = False + for arg in sys.argv[1:]: + if arg == '--debug': + debug = True + continue + args.append(arg) + if not arg.startswith('-'): + log_file = arg + + if on_windows: + args.append('--windows') + + with open(log_file, 'r') as f: + lines = f.readlines() + + d8_line = re.search(',\"(.*d8)', ''.join(lines)) + if d8_line: + d8_exec = d8_line.group(1) + if not is_file_executable(d8_exec): + print 'd8 binary path found in {} is not executable.'.format(log_file) + sys.exit(-1) + else: + print 'No d8 binary path found in {}.'.format(log_file) + sys.exit(-1) + + args = [d8_exec] + JS_FILES + ['--'] + args + + with open(log_file) as f: + sp = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, + stdin=f) + out, err = sp.communicate() + if debug: + print err + if sp.returncode != 0: + print out + exit(-1) + + if on_windows and out: + out = re.sub('\r+\n', '\n', out) + + is_written = not bool(out) + with open(log_file, 'w') as f: + for line in lines: + if not is_written and line.startswith('tick'): + f.write(out) + is_written = True + f.write(line) diff --git a/deps/v8/tools/dumpcpp-driver.js b/deps/v8/tools/dumpcpp-driver.js new file mode 100644 index 00000000000000..44527771e47b3a --- /dev/null +++ b/deps/v8/tools/dumpcpp-driver.js @@ -0,0 +1,45 @@ +// Copyright 2016 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Dump C++ symbols of shared library if possible + +function processArguments(args) { + var processor = new ArgumentsProcessor(args); + if (processor.parse()) { + return processor.result(); + } else { + processor.printUsageAndExit(); + } +} + +function initSourceMapSupport() { + // Pull dev tools source maps into our name space. + SourceMap = WebInspector.SourceMap; + + // Overwrite the load function to load scripts synchronously. + SourceMap.load = function(sourceMapURL) { + var content = readFile(sourceMapURL); + var sourceMapObject = (JSON.parse(content)); + return new SourceMap(sourceMapURL, sourceMapObject); + }; +} + +var entriesProviders = { + 'unix': UnixCppEntriesProvider, + 'windows': WindowsCppEntriesProvider, + 'mac': MacCppEntriesProvider +}; + +var params = processArguments(arguments); +var sourceMap = null; +if (params.sourceMap) { + initSourceMapSupport(); + sourceMap = SourceMap.load(params.sourceMap); +} + +var cppProcessor = new CppProcessor( + new (entriesProviders[params.platform])(params.nm, params.targetRootFS), + params.timedRange, params.pairwiseTimedRange); +cppProcessor.processLogFile(params.logFileName); +cppProcessor.dumpCppSymbols(); diff --git a/deps/v8/tools/dumpcpp.js b/deps/v8/tools/dumpcpp.js new file mode 100644 index 00000000000000..ca5ff675f325c5 --- /dev/null +++ b/deps/v8/tools/dumpcpp.js @@ -0,0 +1,58 @@ +// Copyright 2016 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +function CppProcessor(cppEntriesProvider, timedRange, pairwiseTimedRange) { + LogReader.call(this, { + 'shared-library': { parsers: [null, parseInt, parseInt, parseInt], + processor: this.processSharedLibrary } + }, timedRange, pairwiseTimedRange); + + this.cppEntriesProvider_ = cppEntriesProvider; + this.codeMap_ = new CodeMap(); + this.lastLogFileName_ = null; +} +inherits(CppProcessor, LogReader); + +/** + * @override + */ +CppProcessor.prototype.printError = function(str) { + print(str); +}; + +CppProcessor.prototype.processLogFile = function(fileName) { + this.lastLogFileName_ = fileName; + var line; + while (line = readline()) { + this.processLogLine(line); + } +}; + +CppProcessor.prototype.processLogFileInTest = function(fileName) { + // Hack file name to avoid dealing with platform specifics. + this.lastLogFileName_ = 'v8.log'; + var contents = readFile(fileName); + this.processLogChunk(contents); +}; + +CppProcessor.prototype.processSharedLibrary = function( + name, startAddr, endAddr, aslrSlide) { + var self = this; + var libFuncs = this.cppEntriesProvider_.parseVmSymbols( + name, startAddr, endAddr, aslrSlide, function(fName, fStart, fEnd) { + var entry = new CodeMap.CodeEntry(fEnd - fStart, fName, 'CPP'); + self.codeMap_.addStaticCode(fStart, entry); + }); +}; + +CppProcessor.prototype.dumpCppSymbols = function() { + var staticEntries = this.codeMap_.getAllStaticEntriesWithAddresses(); + var total = staticEntries.length; + for (var i = 0; i < total; ++i) { + var entry = staticEntries[i]; + var printValues = ['cpp', '0x' + entry[0].toString(16), entry[1].size, + '"' + entry[1].name + '"']; + print(printValues.join(',')); + } +}; diff --git a/deps/v8/tools/eval_gc_nvp.py b/deps/v8/tools/eval_gc_nvp.py index fcb6d8b9a2232f..25afe8e4f0a74a 100755 --- a/deps/v8/tools/eval_gc_nvp.py +++ b/deps/v8/tools/eval_gc_nvp.py @@ -10,7 +10,7 @@ from argparse import ArgumentParser from copy import deepcopy from gc_nvp_common import split_nvp -from math import log +from math import ceil,log from sys import stdin @@ -74,11 +74,12 @@ def __str__(self): class Category: - def __init__(self, key, histogram, csv): + def __init__(self, key, histogram, csv, percentiles): self.key = key self.values = [] self.histogram = histogram self.csv = csv + self.percentiles = percentiles def process_entry(self, entry): if self.key in entry: @@ -100,6 +101,16 @@ def avg(self): def empty(self): return len(self.values) == 0 + def _compute_percentiles(self): + ret = [] + if len(self.values) == 0: + return ret + sorted_values = sorted(self.values) + for percentile in self.percentiles: + index = int(ceil((len(self.values) - 1) * percentile / 100)) + ret.append(" {0}%: {1}".format(percentile, sorted_values[index])) + return ret + def __str__(self): if self.csv: ret = [self.key] @@ -118,6 +129,8 @@ def __str__(self): ret.append(" avg: {0}".format(self.avg())) if self.histogram: ret.append(str(self.histogram)) + if self.percentiles: + ret.append("\n".join(self._compute_percentiles())) return "\n".join(ret) def __repr__(self): @@ -160,6 +173,9 @@ def main(): help="rank keys by metric (default: no)") parser.add_argument('--csv', dest='csv', action='store_true', help='provide output as csv') + parser.add_argument('--percentiles', dest='percentiles', + type=str, default="", + help='comma separated list of percentiles') args = parser.parse_args() histogram = None @@ -171,7 +187,14 @@ def main(): bucket_trait = LinearBucket(args.linear_histogram_granularity) histogram = Histogram(bucket_trait, not args.histogram_omit_empty) - categories = [ Category(key, deepcopy(histogram), args.csv) + percentiles = [] + for percentile in args.percentiles.split(','): + try: + percentiles.append(float(percentile)) + except ValueError: + pass + + categories = [ Category(key, deepcopy(histogram), args.csv, percentiles) for key in args.keys ] while True: diff --git a/deps/v8/tools/eval_gc_time.sh b/deps/v8/tools/eval_gc_time.sh index ceb4db54cb4606..140165da438dfa 100755 --- a/deps/v8/tools/eval_gc_time.sh +++ b/deps/v8/tools/eval_gc_time.sh @@ -17,6 +17,7 @@ print_usage_and_die() { echo " -c|--csv provide csv output" echo " -f|--file FILE profile input in a file" echo " (default: stdin)" + echo " -p|--percentiles comma separated percentiles" exit 1 } @@ -25,6 +26,7 @@ RANK_MODE=max TOP_LEVEL=no CSV="" LOGFILE=/dev/stdin +PERCENTILES="" while [[ $# -ge 1 ]] do @@ -60,6 +62,10 @@ do LOGFILE=$2 shift ;; + -p|--percentiles) + PERCENTILES="--percentiles=$2" + shift + ;; *) break ;; @@ -98,7 +104,6 @@ INTERESTING_OLD_GEN_KEYS="\ evacuate.clean_up \ evacuate.copy \ evacuate.update_pointers \ - evacuate.update_pointers.between_evacuated \ evacuate.update_pointers.to_evacuated \ evacuate.update_pointers.to_new \ evacuate.update_pointers.weak \ @@ -145,6 +150,7 @@ case $OP in --no-histogram \ --rank $RANK_MODE \ $CSV \ + $PERCENTILES \ ${INTERESTING_NEW_GEN_KEYS} ;; old-gen-rank) @@ -153,6 +159,7 @@ case $OP in --no-histogram \ --rank $RANK_MODE \ $CSV \ + $PERCENTILES \ ${INTERESTING_OLD_GEN_KEYS} ;; *) diff --git a/deps/v8/tools/gcmole/gcmole.lua b/deps/v8/tools/gcmole/gcmole.lua index 82ea4e02951698..42cb2e370bd9cb 100644 --- a/deps/v8/tools/gcmole/gcmole.lua +++ b/deps/v8/tools/gcmole/gcmole.lua @@ -183,27 +183,54 @@ end ------------------------------------------------------------------------------- -- GYP file parsing +-- TODO(machenbach): Remove this when deprecating gyp. local function ParseGYPFile() - local gyp = "" - local gyp_files = { "tools/gyp/v8.gyp", "test/cctest/cctest.gyp" } + local result = {} + local gyp_files = { + { "src/v8.gyp", "'([^']-%.cc)'", "src/" }, + { "test/cctest/cctest.gyp", "'(test-[^']-%.cc)'", "test/cctest/" } + } + for i = 1, #gyp_files do - local f = assert(io.open(gyp_files[i]), "failed to open GYP file") - local t = f:read('*a') - gyp = gyp .. t - f:close() + local filename = gyp_files[i][1] + local pattern = gyp_files[i][2] + local prefix = gyp_files[i][3] + local gyp_file = assert(io.open(filename), "failed to open GYP file") + local gyp = gyp_file:read('*a') + for condition, sources in + gyp:gmatch "%[.-### gcmole%((.-)%) ###(.-)%]" do + if result[condition] == nil then result[condition] = {} end + for file in sources:gmatch(pattern) do + table.insert(result[condition], prefix .. file) + end + end + gyp_file:close() end - local result = {} + return result +end - for condition, sources in - gyp:gmatch "'sources': %[.-### gcmole%((.-)%) ###(.-)%]" do - if result[condition] == nil then result[condition] = {} end - for file in sources:gmatch "'%.%./%.%./src/([^']-%.cc)'" do - table.insert(result[condition], "src/" .. file) - end - for file in sources:gmatch "'(test-[^']-%.cc)'" do - table.insert(result[condition], "test/cctest/" .. file) +local function ParseGNFile() + local result = {} + local gn_files = { + { "BUILD.gn", '"([^"]-%.cc)"', "" }, + { "test/cctest/BUILD.gn", '"(test-[^"]-%.cc)"', "test/cctest/" } + } + + for i = 1, #gn_files do + local filename = gn_files[i][1] + local pattern = gn_files[i][2] + local prefix = gn_files[i][3] + local gn_file = assert(io.open(filename), "failed to open GN file") + local gn = gn_file:read('*a') + for condition, sources in + gn:gmatch "### gcmole%((.-)%) ###(.-)%]" do + if result[condition] == nil then result[condition] = {} end + for file in sources:gmatch(pattern) do + table.insert(result[condition], prefix .. file) + end end + gn_file:close() end return result @@ -230,13 +257,40 @@ local function BuildFileList(sources, props) return list end -local sources = ParseGYPFile() + +local gyp_sources = ParseGYPFile() +local gn_sources = ParseGNFile() + +-- TODO(machenbach): Remove this comparison logic when deprecating gyp. +local function CompareSources(sources1, sources2, what) + for condition, files1 in pairs(sources1) do + local files2 = sources2[condition] + assert( + files2 ~= nil, + "Missing gcmole condition in " .. what .. ": " .. condition) + + -- Turn into set for speed. + files2_set = {} + for i, file in pairs(files2) do files2_set[file] = true end + + for i, file in pairs(files1) do + assert( + files2_set[file] ~= nil, + "Missing file " .. file .. " in " .. what .. " for condition " .. + condition) + end + end +end + +CompareSources(gyp_sources, gn_sources, "GN") +CompareSources(gn_sources, gyp_sources, "GYP") + local function FilesForArch(arch) - return BuildFileList(sources, { os = 'linux', - arch = arch, - mode = 'debug', - simulator = ''}) + return BuildFileList(gn_sources, { os = 'linux', + arch = arch, + mode = 'debug', + simulator = ''}) end local mtConfig = {} diff --git a/deps/v8/tools/gcmole/run-gcmole.isolate b/deps/v8/tools/gcmole/run-gcmole.isolate index df6e9a267f2563..0fba2a12c1cd4e 100644 --- a/deps/v8/tools/gcmole/run-gcmole.isolate +++ b/deps/v8/tools/gcmole/run-gcmole.isolate @@ -12,11 +12,12 @@ 'parallel.py', 'run-gcmole.py', # The following contains all relevant source and gyp files. - '../gyp/v8.gyp', + '../../BUILD.gn', '../../base/', '../../include/', '../../src/', '../../test/cctest/', + '../../testing/gtest/include/gtest/gtest_prod.h', '../../third_party/icu/source/', ], }, diff --git a/deps/v8/tools/gcmole/run_gcmole.gyp b/deps/v8/tools/gcmole/run_gcmole.gyp index 9d13f7606a5de5..7d206bf412539d 100644 --- a/deps/v8/tools/gcmole/run_gcmole.gyp +++ b/deps/v8/tools/gcmole/run_gcmole.gyp @@ -10,8 +10,8 @@ 'target_name': 'run_gcmole_run', 'type': 'none', 'includes': [ - '../../build/features.gypi', - '../../build/isolate.gypi', + '../../gypfiles/features.gypi', + '../../gypfiles/isolate.gypi', ], 'sources': [ 'run-gcmole.isolate', diff --git a/deps/v8/tools/gdb-v8-support.py b/deps/v8/tools/gdb-v8-support.py index 5d26146fc7e10b..99616727e3c4ab 100644 --- a/deps/v8/tools/gdb-v8-support.py +++ b/deps/v8/tools/gdb-v8-support.py @@ -167,7 +167,7 @@ def find (self, startAddr, endAddr, value): "find 0x%s, 0x%s, %s" % (startAddr, endAddr, value), to_string = True) if result.find("not found") == -1: - print result + print(result) except: pass diff --git a/deps/v8/tools/gdbinit b/deps/v8/tools/gdbinit index 5e6af9d6a89ed0..1eae053f2ce031 100644 --- a/deps/v8/tools/gdbinit +++ b/deps/v8/tools/gdbinit @@ -4,7 +4,7 @@ # Print HeapObjects. define job -print ((v8::internal::HeapObject*)($arg0))->Print() +call _v8_internal_Print_Object((void*)($arg0)) end document job Print a v8 JavaScript object @@ -13,7 +13,7 @@ end # Print Code objects containing given PC. define jco -job (v8::internal::Isolate::Current()->FindCodeObject((v8::internal::Address)$arg0)) +call _v8_internal_Print_Code((void*)($arg0)) end document jco Print a v8 Code object from an internal code address @@ -22,7 +22,7 @@ end # Print TypeFeedbackVector define jfv -print ((v8::internal::TypeFeedbackVector*)($arg0))->Print() +call _v8_internal_Print_TypeFeedbackVector((void*)($arg0)) end document jfv Print a v8 TypeFeedbackVector object @@ -31,7 +31,7 @@ end # Print DescriptorArray. define jda -print ((v8::internal::DescriptorArray*)($arg0))->Print() +call _v8_internal_Print_DescriptorArray((void*)($arg0)) end document jda Print a v8 DescriptorArray object @@ -40,7 +40,7 @@ end # Print TransitionArray. define jta -print ((v8::internal::TransitionArray*)($arg0))->Print() +call _v8_internal_Print_TransitionArray((void*)($arg0)) end document jta Print a v8 TransitionArray object @@ -49,12 +49,24 @@ end # Print JavaScript stack trace. define jst -print v8::internal::Isolate::Current()->PrintStack((FILE*) stdout, 1) +call _v8_internal_Print_StackTrace() end document jst Print the current JavaScript stack trace Usage: jst end +# Skip the JavaScript stack. +define jss +set $js_entry_sp=v8::internal::Isolate::Current()->thread_local_top()->js_entry_sp_ +set $rbp=*(void**)$js_entry_sp +set $rsp=$js_entry_sp + 2*sizeof(void*) +set $pc=*(void**)($js_entry_sp+sizeof(void*)) +end +document jss +Skip the jitted stack on x64 to where we entered JS last. +Usage: jss +end + set disassembly-flavor intel set disable-randomization off diff --git a/deps/v8/tools/gen-inlining-tests.py b/deps/v8/tools/gen-inlining-tests.py new file mode 100644 index 00000000000000..1a377e61edb2ee --- /dev/null +++ b/deps/v8/tools/gen-inlining-tests.py @@ -0,0 +1,566 @@ +#!/usr/bin/env python3 + +# Copyright 2016 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + + +from collections import namedtuple +import textwrap +import sys + +SHARD_FILENAME_TEMPLATE = "test/mjsunit/compiler/inline-exception-{shard}.js" +# Generates 2 files. Found by trial and error. +SHARD_SIZE = 97 + +PREAMBLE = """ + +// Copyright 2016 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --allow-natives-syntax --turbo --no-always-opt + +// This test file was generated by tools/gen-inlining-tests.py . + +// Global variables +var deopt = undefined; // either true or false +var counter = 0; + +function resetState() { + counter = 0; +} + +function warmUp(f) { + try { + f(); + } catch (ex) { + // ok + } + try { + f(); + } catch (ex) { + // ok + } +} + +function resetOptAndAssertResultEquals(expected, f) { + warmUp(f); + resetState(); + // %DebugPrint(f); + eval("'dont optimize this function itself please, but do optimize f'"); + %OptimizeFunctionOnNextCall(f); + assertEquals(expected, f()); +} + +function resetOptAndAssertThrowsWith(expected, f) { + warmUp(f); + resetState(); + // %DebugPrint(f); + eval("'dont optimize this function itself please, but do optimize f'"); + %OptimizeFunctionOnNextCall(f); + try { + var result = f(); + fail("resetOptAndAssertThrowsWith", + "exception: " + expected, + "result: " + result); + } catch (ex) { + assertEquals(expected, ex); + } +} + +function increaseAndReturn15() { + if (deopt) %DeoptimizeFunction(f); + counter++; + return 15; +} + +function increaseAndThrow42() { + if (deopt) %DeoptimizeFunction(f); + counter++; + throw 42; +} + +function increaseAndReturn15_noopt_inner() { + if (deopt) %DeoptimizeFunction(f); + counter++; + return 15; +} + +%NeverOptimizeFunction(increaseAndReturn15_noopt_inner); + +function increaseAndThrow42_noopt_inner() { + if (deopt) %DeoptimizeFunction(f); + counter++; + throw 42; +} + +%NeverOptimizeFunction(increaseAndThrow42_noopt_inner); + +// Alternative 1 + +function returnOrThrow(doReturn) { + if (doReturn) { + return increaseAndReturn15(); + } else { + return increaseAndThrow42(); + } +} + +// Alternative 2 + +function increaseAndReturn15_calls_noopt() { + return increaseAndReturn15_noopt_inner(); +} + +function increaseAndThrow42_calls_noopt() { + return increaseAndThrow42_noopt_inner(); +} + +// Alternative 3. +// When passed either {increaseAndReturn15} or {increaseAndThrow42}, it acts +// as the other one. +function invertFunctionCall(f) { + var result; + try { + result = f(); + } catch (ex) { + return ex - 27; + } + throw result + 27; +} + +// Alternative 4: constructor +function increaseAndStore15Constructor() { + if (deopt) %DeoptimizeFunction(f); + ++counter; + this.x = 15; +} + +function increaseAndThrow42Constructor() { + if (deopt) %DeoptimizeFunction(f); + ++counter; + this.x = 42; + throw this.x; +} + +// Alternative 5: property +var magic = {}; +Object.defineProperty(magic, 'prop', { + get: function () { + if (deopt) %DeoptimizeFunction(f); + return 15 + 0 * ++counter; + }, + + set: function(x) { + // argument should be 37 + if (deopt) %DeoptimizeFunction(f); + counter -= 36 - x; // increments counter + throw 42; + } +}) + +// Generate type feedback. + +assertEquals(15, increaseAndReturn15_calls_noopt()); +assertThrowsEquals(function() { return increaseAndThrow42_noopt_inner() }, 42); + +assertEquals(15, (new increaseAndStore15Constructor()).x); +assertThrowsEquals(function() { + return (new increaseAndThrow42Constructor()).x; + }, + 42); + +function runThisShard() { + +""".strip() + +def booltuples(n): + """booltuples(2) yields 4 tuples: (False, False), (False, True), + (True, False), (True, True).""" + + assert isinstance(n, int) + if n <= 0: + yield () + else: + for initial in booltuples(n-1): + yield initial + (False,) + yield initial + (True,) + +def fnname(flags): + assert len(FLAGLETTERS) == len(flags) + + return "f_" + ''.join( + FLAGLETTERS[i] if b else '_' + for (i, b) in enumerate(flags)) + +NUM_TESTS_PRINTED = 0 +NUM_TESTS_IN_SHARD = 0 + +def printtest(flags): + """Print a test case. Takes a couple of boolean flags, on which the + printed Javascript code depends.""" + + assert all(isinstance(flag, bool) for flag in flags) + + # The alternative flags are in reverse order so that if we take all possible + # tuples, ordered lexicographically from false to true, we get first the + # default, then alternative 1, then 2, etc. + ( + alternativeFn5, # use alternative #5 for returning/throwing: + # return/throw using property + alternativeFn4, # use alternative #4 for returning/throwing: + # return/throw using constructor + alternativeFn3, # use alternative #3 for returning/throwing: + # return/throw indirectly, based on function argument + alternativeFn2, # use alternative #2 for returning/throwing: + # return/throw indirectly in unoptimized code, + # no branching + alternativeFn1, # use alternative #1 for returning/throwing: + # return/throw indirectly, based on boolean arg + tryThrows, # in try block, call throwing function + tryReturns, # in try block, call returning function + tryFirstReturns, # in try block, returning goes before throwing + tryResultToLocal, # in try block, result goes to local variable + doCatch, # include catch block + catchReturns, # in catch block, return + catchWithLocal, # in catch block, modify or return the local variable + catchThrows, # in catch block, throw + doFinally, # include finally block + finallyReturns, # in finally block, return local variable + finallyThrows, # in finally block, throw + endReturnLocal, # at very end, return variable local + deopt, # deopt inside inlined function + ) = flags + + # BASIC RULES + + # Only one alternative can be applied at any time. + if (alternativeFn1 + alternativeFn2 + alternativeFn3 + alternativeFn4 + + alternativeFn5 > 1): + return + + # In try, return or throw, or both. + if not (tryReturns or tryThrows): return + + # Either doCatch or doFinally. + if not doCatch and not doFinally: return + + # Catch flags only make sense when catching + if not doCatch and (catchReturns or catchWithLocal or catchThrows): + return + + # Finally flags only make sense when finallying + if not doFinally and (finallyReturns or finallyThrows): + return + + # tryFirstReturns is only relevant when both tryReturns and tryThrows are + # true. + if tryFirstReturns and not (tryReturns and tryThrows): return + + # From the try and finally block, we can return or throw, but not both. + if catchReturns and catchThrows: return + if finallyReturns and finallyThrows: return + + # If at the end we return the local, we need to have touched it. + if endReturnLocal and not (tryResultToLocal or catchWithLocal): return + + # PRUNING + + anyAlternative = any([alternativeFn1, alternativeFn2, alternativeFn3, + alternativeFn4, alternativeFn5]) + specificAlternative = any([alternativeFn2, alternativeFn3]) + rareAlternative = not specificAlternative + + # If try returns and throws, then don't catchWithLocal, endReturnLocal, or + # deopt, or do any alternative. + if (tryReturns and tryThrows and + (catchWithLocal or endReturnLocal or deopt or anyAlternative)): + return + # We don't do any alternative if we do a finally. + if doFinally and anyAlternative: return + # We only use the local variable if we do alternative #2 or #3. + if ((tryResultToLocal or catchWithLocal or endReturnLocal) and + not specificAlternative): + return + # We don't need to test deopting into a finally. + if doFinally and deopt: return + + # We're only interested in alternative #2 if we have endReturnLocal, no + # catchReturns, and no catchThrows, and deopt. + if (alternativeFn2 and + (not endReturnLocal or catchReturns or catchThrows or not deopt)): + return + + + # Flag check succeeded. + + trueFlagNames = [name for (name, value) in flags._asdict().items() if value] + flagsMsgLine = " // Variant flags: [{}]".format(', '.join(trueFlagNames)) + write(textwrap.fill(flagsMsgLine, subsequent_indent=' // ')) + write("") + + if not anyAlternative: + fragments = { + 'increaseAndReturn15': 'increaseAndReturn15()', + 'increaseAndThrow42': 'increaseAndThrow42()', + } + elif alternativeFn1: + fragments = { + 'increaseAndReturn15': 'returnOrThrow(true)', + 'increaseAndThrow42': 'returnOrThrow(false)', + } + elif alternativeFn2: + fragments = { + 'increaseAndReturn15': 'increaseAndReturn15_calls_noopt()', + 'increaseAndThrow42': 'increaseAndThrow42_calls_noopt()', + } + elif alternativeFn3: + fragments = { + 'increaseAndReturn15': 'invertFunctionCall(increaseAndThrow42)', + 'increaseAndThrow42': 'invertFunctionCall(increaseAndReturn15)', + } + elif alternativeFn4: + fragments = { + 'increaseAndReturn15': '(new increaseAndStore15Constructor()).x', + 'increaseAndThrow42': '(new increaseAndThrow42Constructor()).x', + } + else: + assert alternativeFn5 + fragments = { + 'increaseAndReturn15': 'magic.prop /* returns 15 */', + 'increaseAndThrow42': '(magic.prop = 37 /* throws 42 */)', + } + + # As we print code, we also maintain what the result should be. Variable + # {result} can be one of three things: + # + # - None, indicating returning JS null + # - ("return", n) with n an integer + # - ("throw", n), with n an integer + + result = None + # We also maintain what the counter should be at the end. + # The counter is reset just before f is called. + counter = 0 + + write( " f = function {} () {{".format(fnname(flags))) + write( " var local = 888;") + write( " deopt = {};".format("true" if deopt else "false")) + local = 888 + write( " try {") + write( " counter++;") + counter += 1 + resultTo = "local +=" if tryResultToLocal else "return" + if tryReturns and not (tryThrows and not tryFirstReturns): + write( " {} 4 + {increaseAndReturn15};".format(resultTo, **fragments)) + if result == None: + counter += 1 + if tryResultToLocal: + local += 19 + else: + result = ("return", 19) + if tryThrows: + write( " {} 4 + {increaseAndThrow42};".format(resultTo, **fragments)) + if result == None: + counter += 1 + result = ("throw", 42) + if tryReturns and tryThrows and not tryFirstReturns: + write( " {} 4 + {increaseAndReturn15};".format(resultTo, **fragments)) + if result == None: + counter += 1 + if tryResultToLocal: + local += 19 + else: + result = ("return", 19) + write( " counter++;") + if result == None: + counter += 1 + + if doCatch: + write( " } catch (ex) {") + write( " counter++;") + if isinstance(result, tuple) and result[0] == 'throw': + counter += 1 + if catchThrows: + write(" throw 2 + ex;") + if isinstance(result, tuple) and result[0] == "throw": + result = ('throw', 2 + result[1]) + elif catchReturns and catchWithLocal: + write(" return 2 + local;") + if isinstance(result, tuple) and result[0] == "throw": + result = ('return', 2 + local) + elif catchReturns and not catchWithLocal: + write(" return 2 + ex;"); + if isinstance(result, tuple) and result[0] == "throw": + result = ('return', 2 + result[1]) + elif catchWithLocal: + write(" local += ex;"); + if isinstance(result, tuple) and result[0] == "throw": + local += result[1] + result = None + counter += 1 + else: + if isinstance(result, tuple) and result[0] == "throw": + result = None + counter += 1 + write( " counter++;") + + if doFinally: + write( " } finally {") + write( " counter++;") + counter += 1 + if finallyThrows: + write(" throw 25;") + result = ('throw', 25) + elif finallyReturns: + write(" return 3 + local;") + result = ('return', 3 + local) + elif not finallyReturns and not finallyThrows: + write(" local += 2;") + local += 2 + counter += 1 + else: assert False # unreachable + write( " counter++;") + + write( " }") + write( " counter++;") + if result == None: + counter += 1 + if endReturnLocal: + write( " return 5 + local;") + if result == None: + result = ('return', 5 + local) + write( " }") + + if result == None: + write( " resetOptAndAssertResultEquals(undefined, f);") + else: + tag, value = result + if tag == "return": + write( " resetOptAndAssertResultEquals({}, f);".format(value)) + else: + assert tag == "throw" + write( " resetOptAndAssertThrowsWith({}, f);".format(value)) + + write( " assertEquals({}, counter);".format(counter)) + write( "") + + global NUM_TESTS_PRINTED, NUM_TESTS_IN_SHARD + NUM_TESTS_PRINTED += 1 + NUM_TESTS_IN_SHARD += 1 + +FILE = None # to be initialised to an open file +SHARD_NUM = 1 + +def write(*args): + return print(*args, file=FILE) + + + +def rotateshard(): + global FILE, NUM_TESTS_IN_SHARD, SHARD_SIZE + if MODE != 'shard': + return + if FILE != None and NUM_TESTS_IN_SHARD < SHARD_SIZE: + return + if FILE != None: + finishshard() + assert FILE == None + FILE = open(SHARD_FILENAME_TEMPLATE.format(shard=SHARD_NUM), 'w') + write_shard_header() + NUM_TESTS_IN_SHARD = 0 + +def finishshard(): + global FILE, SHARD_NUM, MODE + assert FILE + write_shard_footer() + if MODE == 'shard': + print("Wrote shard {}.".format(SHARD_NUM)) + FILE.close() + FILE = None + SHARD_NUM += 1 + + +def write_shard_header(): + if MODE == 'shard': + write("// Shard {}.".format(SHARD_NUM)) + write("") + write(PREAMBLE) + write("") + +def write_shard_footer(): + write("}") + write("%NeverOptimizeFunction(runThisShard);") + write("") + write("// {} tests in this shard.".format(NUM_TESTS_IN_SHARD)) + write("// {} tests up to here.".format(NUM_TESTS_PRINTED)) + write("") + write("runThisShard();") + +FLAGLETTERS="54321trflcrltfrtld" + +flagtuple = namedtuple('flagtuple', ( + "alternativeFn5", + "alternativeFn4", + "alternativeFn3", + "alternativeFn2", + "alternativeFn1", + "tryThrows", + "tryReturns", + "tryFirstReturns", + "tryResultToLocal", + "doCatch", + "catchReturns", + "catchWithLocal", + "catchThrows", + "doFinally", + "finallyReturns", + "finallyThrows", + "endReturnLocal", + "deopt" + )) + +emptyflags = flagtuple(*((False,) * len(flagtuple._fields))) +f1 = emptyflags._replace(tryReturns=True, doCatch=True) + +# You can test function printtest with f1. + +allFlagCombinations = [ + flagtuple(*bools) + for bools in booltuples(len(flagtuple._fields)) +] + +if __name__ == '__main__': + global MODE + if sys.argv[1:] == []: + MODE = 'stdout' + print("// Printing all shards together to stdout.") + print("") + write_shard_header() + FILE = sys.stdout + elif sys.argv[1:] == ['--shard-and-overwrite']: + MODE = 'shard' + else: + print("Usage:") + print("") + print(" python {}".format(sys.argv[0])) + print(" print all tests to standard output") + print(" python {} --shard-and-overwrite".format(sys.argv[0])) + print(" print all tests to {}".format(SHARD_FILENAME_TEMPLATE)) + + print("") + print(sys.argv[1:]) + print("") + sys.exit(1) + + rotateshard() + + for flags in allFlagCombinations: + printtest(flags) + rotateshard() + + finishshard() + + if MODE == 'shard': + print("Total: {} tests.".format(NUM_TESTS_PRINTED)) diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py index a0afc06ab9ed6b..5fd39f3b8541ea 100644 --- a/deps/v8/tools/gen-postmortem-metadata.py +++ b/deps/v8/tools/gen-postmortem-metadata.py @@ -50,7 +50,8 @@ import sys # -# Miscellaneous constants, tags, and masks used for object identification. +# Miscellaneous constants such as tags and masks used for object identification, +# enumeration values used as indexes in internal tables, etc.. # consts_misc = [ { 'name': 'FirstNonstringType', 'value': 'FIRST_NONSTRING_TYPE' }, @@ -92,6 +93,8 @@ 'value': 'DescriptorArray::kFirstIndex' }, { 'name': 'prop_type_field', 'value': 'DATA' }, + { 'name': 'prop_type_const_field', + 'value': 'DATA_CONSTANT' }, { 'name': 'prop_type_mask', 'value': 'PropertyDetails::TypeField::kMask' }, { 'name': 'prop_index_mask', @@ -165,8 +168,6 @@ 'value': 'ScopeInfo::kStackLocalCount' }, { 'name': 'scopeinfo_idx_ncontextlocals', 'value': 'ScopeInfo::kContextLocalCount' }, - { 'name': 'scopeinfo_idx_ncontextglobals', - 'value': 'ScopeInfo::kContextGlobalCount' }, { 'name': 'scopeinfo_idx_first_vars', 'value': 'ScopeInfo::kVariablePartIndex' }, @@ -179,17 +180,49 @@ 'value': 'JSArrayBuffer::WasNeutered::kMask' }, { 'name': 'jsarray_buffer_was_neutered_shift', 'value': 'JSArrayBuffer::WasNeutered::kShift' }, + + { 'name': 'context_idx_closure', + 'value': 'Context::CLOSURE_INDEX' }, + { 'name': 'context_idx_native', + 'value': 'Context::NATIVE_CONTEXT_INDEX' }, + { 'name': 'context_idx_prev', + 'value': 'Context::PREVIOUS_INDEX' }, + { 'name': 'context_idx_ext', + 'value': 'Context::EXTENSION_INDEX' }, + { 'name': 'context_min_slots', + 'value': 'Context::MIN_CONTEXT_SLOTS' }, + + { 'name': 'namedictionaryshape_prefix_size', + 'value': 'NameDictionaryShape::kPrefixSize' }, + { 'name': 'namedictionaryshape_entry_size', + 'value': 'NameDictionaryShape::kEntrySize' }, + { 'name': 'globaldictionaryshape_entry_size', + 'value': 'GlobalDictionaryShape::kEntrySize' }, + + { 'name': 'namedictionary_prefix_start_index', + 'value': 'NameDictionary::kPrefixStartIndex' }, + + { 'name': 'seedednumberdictionaryshape_prefix_size', + 'value': 'SeededNumberDictionaryShape::kPrefixSize' }, + { 'name': 'seedednumberdictionaryshape_entry_size', + 'value': 'SeededNumberDictionaryShape::kEntrySize' }, + + { 'name': 'unseedednumberdictionaryshape_prefix_size', + 'value': 'UnseededNumberDictionaryShape::kPrefixSize' }, + { 'name': 'unseedednumberdictionaryshape_entry_size', + 'value': 'UnseededNumberDictionaryShape::kEntrySize' } ]; # # The following useful fields are missing accessors, so we define fake ones. +# Please note that extra accessors should _only_ be added to expose offsets that +# can be used to access actual V8 objects' properties. They should not be added +# for exposing other values. For instance, enumeration values or class' +# constants should be exposed by adding an entry in the "consts_misc" table, not +# in this "extras_accessors" table. # extras_accessors = [ 'JSFunction, context, Context, kContextOffset', - 'Context, closure_index, int, CLOSURE_INDEX', - 'Context, native_context_index, int, NATIVE_CONTEXT_INDEX', - 'Context, previous_index, int, PREVIOUS_INDEX', - 'Context, min_context_slots, int, MIN_CONTEXT_SLOTS', 'HeapObject, map, Map, kMapOffset', 'JSObject, elements, Object, kElementsOffset', 'FixedArray, data, uintptr_t, kHeaderSize', @@ -203,12 +236,6 @@ 'Map, bit_field2, char, kBitField2Offset', 'Map, bit_field3, int, kBitField3Offset', 'Map, prototype, Object, kPrototypeOffset', - 'NameDictionaryShape, prefix_size, int, kPrefixSize', - 'NameDictionaryShape, entry_size, int, kEntrySize', - 'NameDictionary, prefix_start_index, int, kPrefixStartIndex', - 'SeededNumberDictionaryShape, prefix_size, int, kPrefixSize', - 'UnseededNumberDictionaryShape, prefix_size, int, kPrefixSize', - 'NumberDictionaryShape, entry_size, int, kEntrySize', 'Oddball, kind_offset, int, kKindOffset', 'HeapNumber, value, double, kValueOffset', 'ConsString, first, String, kFirstOffset', @@ -252,6 +279,7 @@ #include "src/v8.h" #include "src/frames.h" #include "src/frames-inl.h" /* for architecture-specific frame constants */ +#include "src/contexts.h" using namespace v8::internal; diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py index ab8f3265a6efe7..6faadcde601a59 100755 --- a/deps/v8/tools/grokdump.py +++ b/deps/v8/tools/grokdump.py @@ -1745,10 +1745,12 @@ def color_addresses(self): frame_pointer = self.reader.ExceptionFP() self.styles[frame_pointer] = "frame" for slot in xrange(stack_top, stack_bottom, self.reader.PointerSize()): - self.styles[slot] = "stackaddress" + # stack address + self.styles[slot] = "sa" for slot in xrange(stack_top, stack_bottom, self.reader.PointerSize()): maybe_address = self.reader.ReadUIntPtr(slot) - self.styles[maybe_address] = "stackval" + # stack value + self.styles[maybe_address] = "sv" if slot == frame_pointer: self.styles[slot] = "frame" frame_pointer = maybe_address @@ -1760,7 +1762,7 @@ def get_style_class(self, address): def get_style_class_string(self, address): style = self.get_style_class(address) if style != None: - return " class=\"%s\" " % style + return " class=%s " % style else: return "" @@ -1875,11 +1877,13 @@ def PrintKnowledge(self): .dmptable { border-collapse : collapse; border-spacing : 0px; + table-layout: fixed; } .codedump { border-collapse : collapse; border-spacing : 0px; + table-layout: fixed; } .addrcomments { @@ -1932,11 +1936,11 @@ def PrintKnowledge(self): background-color : cyan; } -.stackaddress { +.stackaddress, .sa { background-color : LightGray; } -.stackval { +.stackval, .sv { background-color : LightCyan; } @@ -1944,16 +1948,17 @@ def PrintKnowledge(self): background-color : cyan; } -.commentinput { +.commentinput, .ci { width : 20em; } -a.nodump:visited { +/* a.nodump */ +a.nd:visited { color : black; text-decoration : none; } -a.nodump:link { +a.nd:link { color : black; text-decoration : none; } @@ -1984,6 +1989,7 @@ def PrintKnowledge(self): send_comment(s.substring(index + address_len), event.srcElement.value); } } +var c = comment; function send_comment(address, comment) { xmlhttp = new XMLHttpRequest(); @@ -2038,7 +2044,7 @@ def PrintKnowledge(self):
- -

Result

+ Your IC-Explorer. +

Usage

Run your script with --trace_ic and upload on this page:
+ /path/to/d8 --trace_ic your_script.js > trace.txt +

Data

+

+ trace + entries: 0 +

+
+

Result

+

Group-Key: -

-

- - - -
-

- +

+

+ + + +
+

+ + diff --git a/deps/v8/tools/ignition/bytecode_dispatches_report.py b/deps/v8/tools/ignition/bytecode_dispatches_report.py new file mode 100755 index 00000000000000..97f8e8394d3ef7 --- /dev/null +++ b/deps/v8/tools/ignition/bytecode_dispatches_report.py @@ -0,0 +1,281 @@ +#! /usr/bin/python +# +# Copyright 2016 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +# + +import argparse +import heapq +import json +from matplotlib import colors +from matplotlib import pyplot +import numpy +import struct +import sys + + +__DESCRIPTION = """ +Process v8.ignition_dispatches_counters.json and list top counters, +or plot a dispatch heatmap. + +Please note that those handlers that may not or will never dispatch +(e.g. Return or Throw) do not show up in the results. +""" + + +__HELP_EPILOGUE = """ +examples: + # Print the hottest bytecodes in descending order, reading from + # default filename v8.ignition_dispatches_counters.json (default mode) + $ tools/ignition/bytecode_dispatches_report.py + + # Print the hottest 15 bytecode dispatch pairs reading from data.json + $ tools/ignition/bytecode_dispatches_report.py -t -n 15 data.json + + # Save heatmap to default filename v8.ignition_dispatches_counters.svg + $ tools/ignition/bytecode_dispatches_report.py -p + + # Save heatmap to filename data.svg + $ tools/ignition/bytecode_dispatches_report.py -p -o data.svg + + # Open the heatmap in an interactive viewer + $ tools/ignition/bytecode_dispatches_report.py -p -i + + # Display the top 5 sources and destinations of dispatches to/from LdaZero + $ tools/ignition/bytecode_dispatches_report.py -f LdaZero -n 5 +""" + +__COUNTER_BITS = struct.calcsize("P") * 8 # Size in bits of a pointer +__COUNTER_MAX = 2**__COUNTER_BITS - 1 + + +def warn_if_counter_may_have_saturated(dispatches_table): + for source, counters_from_source in iteritems(dispatches_table): + for destination, counter in iteritems(counters_from_source): + if counter == __COUNTER_MAX: + print "WARNING: {} -> {} may have saturated.".format(source, + destination) + + +def find_top_bytecode_dispatch_pairs(dispatches_table, top_count): + def flattened_counters_generator(): + for source, counters_from_source in iteritems(dispatches_table): + for destination, counter in iteritems(counters_from_source): + yield source, destination, counter + + return heapq.nlargest(top_count, flattened_counters_generator(), + key=lambda x: x[2]) + + +def print_top_bytecode_dispatch_pairs(dispatches_table, top_count): + top_bytecode_dispatch_pairs = ( + find_top_bytecode_dispatch_pairs(dispatches_table, top_count)) + print "Top {} bytecode dispatch pairs:".format(top_count) + for source, destination, counter in top_bytecode_dispatch_pairs: + print "{:>12d}\t{} -> {}".format(counter, source, destination) + + +def find_top_bytecodes(dispatches_table): + top_bytecodes = [] + for bytecode, counters_from_bytecode in iteritems(dispatches_table): + top_bytecodes.append((bytecode, sum(itervalues(counters_from_bytecode)))) + + top_bytecodes.sort(key=lambda x: x[1], reverse=True) + return top_bytecodes + + +def print_top_bytecodes(dispatches_table): + top_bytecodes = find_top_bytecodes(dispatches_table) + print "Top bytecodes:" + for bytecode, counter in top_bytecodes: + print "{:>12d}\t{}".format(counter, bytecode) + + +def find_top_dispatch_sources_and_destinations( + dispatches_table, bytecode, top_count, sort_source_relative): + sources = [] + for source, destinations in iteritems(dispatches_table): + total = float(sum(itervalues(destinations))) + if bytecode in destinations: + count = destinations[bytecode] + sources.append((source, count, count / total)) + + destinations = [] + bytecode_destinations = dispatches_table[bytecode] + bytecode_total = float(sum(itervalues(bytecode_destinations))) + for destination, count in iteritems(bytecode_destinations): + destinations.append((destination, count, count / bytecode_total)) + + return (heapq.nlargest(top_count, sources, + key=lambda x: x[2 if sort_source_relative else 1]), + heapq.nlargest(top_count, destinations, key=lambda x: x[1])) + + +def print_top_dispatch_sources_and_destinations(dispatches_table, bytecode, + top_count, sort_relative): + top_sources, top_destinations = find_top_dispatch_sources_and_destinations( + dispatches_table, bytecode, top_count, sort_relative) + print "Top sources of dispatches to {}:".format(bytecode) + for source_name, counter, ratio in top_sources: + print "{:>12d}\t{:>5.1f}%\t{}".format(counter, ratio * 100, source_name) + + print "\nTop destinations of dispatches from {}:".format(bytecode) + for destination_name, counter, ratio in top_destinations: + print "{:>12d}\t{:>5.1f}%\t{}".format(counter, ratio * 100, destination_name) + + +def build_counters_matrix(dispatches_table): + labels = sorted(dispatches_table.keys()) + + counters_matrix = numpy.empty([len(labels), len(labels)], dtype=int) + for from_index, from_name in enumerate(labels): + current_row = dispatches_table[from_name]; + for to_index, to_name in enumerate(labels): + counters_matrix[from_index, to_index] = current_row.get(to_name, 0) + + # Reverse y axis for a nicer appearance + xlabels = labels + ylabels = list(reversed(xlabels)) + counters_matrix = numpy.flipud(counters_matrix) + + return counters_matrix, xlabels, ylabels + + +def plot_dispatches_table(dispatches_table, figure, axis): + counters_matrix, xlabels, ylabels = build_counters_matrix(dispatches_table) + + image = axis.pcolor( + counters_matrix, + cmap="jet", + norm=colors.LogNorm(), + edgecolor="grey", + linestyle="dotted", + linewidth=0.5 + ) + + axis.xaxis.set( + ticks=numpy.arange(0.5, len(xlabels)), + label="From bytecode handler" + ) + axis.xaxis.tick_top() + axis.set_xlim(0, len(xlabels)) + axis.set_xticklabels(xlabels, rotation="vertical") + + axis.yaxis.set( + ticks=numpy.arange(0.5, len(ylabels)), + label="To bytecode handler", + ticklabels=ylabels + ) + axis.set_ylim(0, len(ylabels)) + + figure.colorbar( + image, + ax=axis, + fraction=0.01, + pad=0.01 + ) + + +def parse_command_line(): + command_line_parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description=__DESCRIPTION, + epilog=__HELP_EPILOGUE + ) + command_line_parser.add_argument( + "--plot-size", "-s", + metavar="N", + default=30, + help="shorter side in inches of the output plot (default 30)" + ) + command_line_parser.add_argument( + "--plot", "-p", + action="store_true", + help="plot dispatch pairs heatmap" + ) + command_line_parser.add_argument( + "--interactive", "-i", + action="store_true", + help="open the heatmap in an interactive viewer, instead of writing to file" + ) + command_line_parser.add_argument( + "--top-bytecode-dispatch-pairs", "-t", + action="store_true", + help="print the top bytecode dispatch pairs" + ) + command_line_parser.add_argument( + "--top-entries-count", "-n", + metavar="N", + type=int, + default=10, + help="print N top entries when running with -t or -f (default 10)" + ) + command_line_parser.add_argument( + "--top-dispatches-for-bytecode", "-f", + metavar="", + help="print top dispatch sources and destinations to the specified bytecode" + ) + command_line_parser.add_argument( + "--output-filename", "-o", + metavar="", + default="v8.ignition_dispatches_table.svg", + help=("file to save the plot file to. File type is deduced from the " + "extension. PDF, SVG, PNG supported") + ) + command_line_parser.add_argument( + "--sort-sources-relative", "-r", + action="store_true", + help=("print top sources in order to how often they dispatch to the " + "specified bytecode, only applied when using -f") + ) + command_line_parser.add_argument( + "input_filename", + metavar="", + default="v8.ignition_dispatches_table.json", + nargs='?', + help="Ignition counters JSON file" + ) + + return command_line_parser.parse_args() + + +def itervalues(d): + return d.values() if sys.version_info[0] > 2 else d.itervalues() + + +def iteritems(d): + return d.items() if sys.version_info[0] > 2 else d.iteritems() + + +def main(): + program_options = parse_command_line() + + with open(program_options.input_filename) as stream: + dispatches_table = json.load(stream) + + warn_if_counter_may_have_saturated(dispatches_table) + + if program_options.plot: + figure, axis = pyplot.subplots() + plot_dispatches_table(dispatches_table, figure, axis) + + if program_options.interactive: + pyplot.show() + else: + figure.set_size_inches(program_options.plot_size, + program_options.plot_size) + pyplot.savefig(program_options.output_filename) + elif program_options.top_bytecode_dispatch_pairs: + print_top_bytecode_dispatch_pairs( + dispatches_table, program_options.top_entries_count) + elif program_options.top_dispatches_for_bytecode: + print_top_dispatch_sources_and_destinations( + dispatches_table, program_options.top_dispatches_for_bytecode, + program_options.top_entries_count, program_options.sort_sources_relative) + else: + print_top_bytecodes(dispatches_table) + + +if __name__ == "__main__": + main() diff --git a/deps/v8/tools/ignition/bytecode_dispatches_report_test.py b/deps/v8/tools/ignition/bytecode_dispatches_report_test.py new file mode 100644 index 00000000000000..9be19e7f6312c4 --- /dev/null +++ b/deps/v8/tools/ignition/bytecode_dispatches_report_test.py @@ -0,0 +1,62 @@ +# Copyright 2016 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import bytecode_dispatches_report as bdr +import unittest + + +class BytecodeDispatchesReportTest(unittest.TestCase): + def test_find_top_counters(self): + top_counters = bdr.find_top_bytecode_dispatch_pairs({ + "a": {"a": 10, "b": 8, "c": 99}, + "b": {"a": 1, "b": 4, "c": 1}, + "c": {"a": 42, "b": 3, "c": 7}}, 5) + self.assertListEqual(top_counters, [ + ('a', 'c', 99), + ('c', 'a', 42), + ('a', 'a', 10), + ('a', 'b', 8), + ('c', 'c', 7)]) + + def test_build_counters_matrix(self): + counters_matrix, xlabels, ylabels = bdr.build_counters_matrix({ + "a": {"a": 10, "b": 8, "c": 7}, + "b": {"a": 1, "c": 4}, + "c": {"a": 42, "b": 12, "c": 99}}) + self.assertTrue((counters_matrix == [[42, 12, 99], + [ 1, 0, 4], + [10, 8, 7]]).all()) + self.assertListEqual(xlabels, ['a', 'b', 'c']) + self.assertListEqual(ylabels, ['c', 'b', 'a']) + + def test_find_top_bytecodes(self): + top_dispatch_sources = bdr.find_top_bytecodes({ + "a": {"a": 10, "b": 8, "c": 7}, + "b": {"a": 1, "c": 4}, + "c": {"a": 42, "b": 12, "c": 99} + }) + self.assertListEqual(top_dispatch_sources, [ + ('c', 153), + ('a', 25), + ('b', 5) + ]) + + def test_find_top_dispatch_sources_and_destinations(self): + d = { + "a": {"a": 4, "b": 2, "c": 4}, + "b": {"a": 1, "c": 4}, + "c": {"a": 40, "b": 10, "c": 50} + } + top_sources, top_dests = bdr.find_top_dispatch_sources_and_destinations( + d, "b", 10, False) + self.assertListEqual(top_sources, [ + ("c", 10, 0.1), + ("a", 2, 0.2) + ]) + top_sources, top_dests = bdr.find_top_dispatch_sources_and_destinations( + d, "b", 10, True) + self.assertListEqual(top_sources, [ + ("a", 2, 0.2), + ("c", 10, 0.1) + ]) diff --git a/deps/v8/tools/ignition/linux_perf_bytecode_annotate.py b/deps/v8/tools/ignition/linux_perf_bytecode_annotate.py new file mode 100755 index 00000000000000..6681190d9909a4 --- /dev/null +++ b/deps/v8/tools/ignition/linux_perf_bytecode_annotate.py @@ -0,0 +1,174 @@ +#! /usr/bin/python2 +# +# Copyright 2016 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +# + +import argparse +import collections +import os +import subprocess +import sys + + +__DESCRIPTION = """ +Processes a perf.data sample file and annotates the hottest instructions in a +given bytecode handler. +""" + + +__HELP_EPILOGUE = """ +Note: + This tool uses the disassembly of interpreter's bytecode handler codegen + from out/.debug/d8. you should ensure that this binary is in-sync with + the version used to generate the perf profile. + + Also, the tool depends on the symbol offsets from perf samples being accurate. + As such, you should use the ":pp" suffix for events. + +Examples: + EVENT_TYPE=cycles:pp tools/run-perf.sh out/x64.release/d8 + tools/ignition/linux_perf_bytecode_annotate.py Add +""" + + +def bytecode_offset_generator(perf_stream, bytecode_name): + skip_until_end_of_chain = False + bytecode_symbol = "BytecodeHandler:" + bytecode_name; + + for line in perf_stream: + # Lines starting with a "#" are comments, skip them. + if line[0] == "#": + continue + line = line.strip() + + # Empty line signals the end of the callchain. + if not line: + skip_until_end_of_chain = False + continue + + if skip_until_end_of_chain: + continue + + symbol_and_offset = line.split(" ", 1)[1] + + if symbol_and_offset.startswith("BytecodeHandler:"): + skip_until_end_of_chain = True + + if symbol_and_offset.startswith(bytecode_symbol): + yield int(symbol_and_offset.split("+", 1)[1], 16) + + +def bytecode_offset_counts(bytecode_offsets): + offset_counts = collections.defaultdict(int) + for offset in bytecode_offsets: + offset_counts[offset] += 1 + return offset_counts + + +def bytecode_disassembly_generator(ignition_codegen, bytecode_name): + name_string = "name = " + bytecode_name + for line in ignition_codegen: + if line.startswith(name_string): + break + + # Found the bytecode disassembly. + for line in ignition_codegen: + line = line.strip() + # Blank line marks the end of the bytecode's disassembly. + if not line: + return + + # Only yield disassembly output. + if not line.startswith("0x"): + continue + + yield line + + +def print_disassembly_annotation(offset_counts, bytecode_disassembly): + total = sum(offset_counts.values()) + offsets = sorted(offset_counts, reverse=True) + def next_offset(): + return offsets.pop() if offsets else -1 + + current_offset = next_offset() + print current_offset; + + for line in bytecode_disassembly: + disassembly_offset = int(line.split()[1]) + if disassembly_offset == current_offset: + count = offset_counts[current_offset] + percentage = 100.0 * count / total + print "{:>8d} ({:>5.1f}%) ".format(count, percentage), + current_offset = next_offset() + else: + print " ", + print line + + if offsets: + print ("WARNING: Offsets not empty. Output is most likely invalid due to " + "a mismatch between perf output and debug d8 binary.") + + +def parse_command_line(): + command_line_parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description=__DESCRIPTION, + epilog=__HELP_EPILOGUE) + + command_line_parser.add_argument( + "--arch", "-a", + help="The architecture (default: x64)", + default="x64", + ) + command_line_parser.add_argument( + "--input", "-i", + help="perf sample file to process (default: perf.data)", + default="perf.data", + metavar="", + dest="perf_filename" + ) + command_line_parser.add_argument( + "--output", "-o", + help="output file name (stdout if omitted)", + type=argparse.FileType("wt"), + default=sys.stdout, + metavar="", + dest="output_stream" + ) + command_line_parser.add_argument( + "bytecode_name", + metavar="", + nargs="?", + help="The bytecode handler to annotate" + ) + + return command_line_parser.parse_args() + + +def main(): + program_options = parse_command_line() + perf = subprocess.Popen(["perf", "script", "-f", "ip,sym,symoff", + "-i", program_options.perf_filename], + stdout=subprocess.PIPE) + + v8_root_path = os.path.dirname(__file__) + "/../../" + d8_path = "{}/out/{}.debug/d8".format(v8_root_path, program_options.arch) + d8_codegen = subprocess.Popen([d8_path, "--ignition", + "--trace-ignition-codegen", "-e", "1"], + stdout=subprocess.PIPE) + + bytecode_offsets = bytecode_offset_generator( + perf.stdout, program_options.bytecode_name) + offset_counts = bytecode_offset_counts(bytecode_offsets) + + bytecode_disassembly = bytecode_disassembly_generator( + d8_codegen.stdout, program_options.bytecode_name) + + print_disassembly_annotation(offset_counts, bytecode_disassembly) + + +if __name__ == "__main__": + main() diff --git a/deps/v8/tools/ignition/linux_perf_bytecode_annotate_test.py b/deps/v8/tools/ignition/linux_perf_bytecode_annotate_test.py new file mode 100644 index 00000000000000..15abbeda08b704 --- /dev/null +++ b/deps/v8/tools/ignition/linux_perf_bytecode_annotate_test.py @@ -0,0 +1,85 @@ +# Copyright 2016 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import StringIO +import unittest +import linux_perf_bytecode_annotate as bytecode_annotate + + +PERF_SCRIPT_OUTPUT = """ +# This line is a comment +# This should be ignored too +# +# cdefab01 aRandomSymbol::Name(to, be, ignored) + + 00000000 firstSymbol + 00000123 secondSymbol + + 01234567 foo + abcdef76 BytecodeHandler:bar+0x12 + 76543210 baz + abcdef76 BytecodeHandler:bar+0x16 + 76543210 baz + + 01234567 foo + abcdef76 BytecodeHandler:foo+0x1 + 76543210 baz + abcdef76 BytecodeHandler:bar+0x2 + 76543210 bar + + abcdef76 BytecodeHandler:bar+0x19 + + abcdef76 BytecodeHandler:bar+0x12 + + abcdef76 BytecodeHandler:bar+0x12 +""" + + +D8_CODEGEN_OUTPUT = """ +kind = BYTECODE_HANDLER +name = foo +compiler = turbofan +Instructions (size = 3) +0x3101394a3c0 0 55 push rbp +0x3101394a3c1 1 ffe3 jmp rbx + +kind = BYTECODE_HANDLER +name = bar +compiler = turbofan +Instructions (size = 5) +0x3101394b3c0 0 55 push rbp +0x3101394b3c1 1 4883c428 REX.W addq rsp,0x28 +# Unexpected comment +0x3101394b3c5 5 ffe3 jmp rbx + +kind = BYTECODE_HANDLER +name = baz +compiler = turbofan +Instructions (size = 5) +0x3101394c3c0 0 55 push rbp +0x3101394c3c1 1 4883c428 REX.W addq rsp,0x28 +0x3101394c3c5 5 ffe3 jmp rbx +""" + + +class LinuxPerfBytecodeAnnotateTest(unittest.TestCase): + + def test_bytecode_offset_generator(self): + perf_stream = StringIO.StringIO(PERF_SCRIPT_OUTPUT) + offsets = list( + bytecode_annotate.bytecode_offset_generator(perf_stream, "bar")) + self.assertListEqual(offsets, [18, 25, 18, 18]) + + def test_bytecode_disassembly_generator(self): + codegen_stream = StringIO.StringIO(D8_CODEGEN_OUTPUT) + disassembly = list( + bytecode_annotate.bytecode_disassembly_generator(codegen_stream, "bar")) + self.assertListEqual(disassembly, [ + "0x3101394b3c0 0 55 push rbp", + "0x3101394b3c1 1 4883c428 REX.W addq rsp,0x28", + "0x3101394b3c5 5 ffe3 jmp rbx"]) + + +if __name__ == "__main__": + unittest.main() diff --git a/deps/v8/tools/ignition/linux_perf_report.py b/deps/v8/tools/ignition/linux_perf_report.py new file mode 100755 index 00000000000000..eaf85b3f91efd2 --- /dev/null +++ b/deps/v8/tools/ignition/linux_perf_report.py @@ -0,0 +1,223 @@ +#! /usr/bin/python2 +# +# Copyright 2016 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +# + +import argparse +import collections +import re +import subprocess +import sys + + +__DESCRIPTION = """ +Processes a perf.data sample file and reports the hottest Ignition bytecodes, +or write an input file for flamegraph.pl. +""" + + +__HELP_EPILOGUE = """ +examples: + # Get a flamegraph for Ignition bytecode handlers on Octane benchmark, + # without considering the time spent compiling JS code, entry trampoline + # samples and other non-Ignition samples. + # + $ tools/run-perf.sh out/x64.release/d8 \\ + --ignition --noturbo --nocrankshaft run.js + $ tools/ignition/linux_perf_report.py --flamegraph -o out.collapsed + $ flamegraph.pl --colors js out.collapsed > out.svg + + # Same as above, but show all samples, including time spent compiling JS code, + # entry trampoline samples and other samples. + $ # ... + $ tools/ignition/linux_perf_report.py \\ + --flamegraph --show-all -o out.collapsed + $ # ... + + # Same as above, but show full function signatures in the flamegraph. + $ # ... + $ tools/ignition/linux_perf_report.py \\ + --flamegraph --show-full-signatures -o out.collapsed + $ # ... + + # See the hottest bytecodes on Octane benchmark, by number of samples. + # + $ tools/run-perf.sh out/x64.release/d8 \\ + --ignition --noturbo --nocrankshaft octane/run.js + $ tools/ignition/linux_perf_report.py +""" + + +COMPILER_SYMBOLS_RE = re.compile( + r"v8::internal::(?:\(anonymous namespace\)::)?Compile|v8::internal::Parser") + + +def strip_function_parameters(symbol): + if symbol[-1] != ')': return symbol + pos = 1 + parenthesis_count = 0 + for c in reversed(symbol): + if c == ')': + parenthesis_count += 1 + elif c == '(': + parenthesis_count -= 1 + if parenthesis_count == 0: + break + else: + pos += 1 + return symbol[:-pos] + + +def collapsed_callchains_generator(perf_stream, show_all=False, + show_full_signatures=False): + current_chain = [] + skip_until_end_of_chain = False + compiler_symbol_in_chain = False + + for line in perf_stream: + # Lines starting with a "#" are comments, skip them. + if line[0] == "#": + continue + + line = line.strip() + + # Empty line signals the end of the callchain. + if not line: + if not skip_until_end_of_chain and current_chain and show_all: + current_chain.append("[other]") + yield current_chain + # Reset parser status. + current_chain = [] + skip_until_end_of_chain = False + compiler_symbol_in_chain = False + continue + + if skip_until_end_of_chain: + continue + + # Trim the leading address and the trailing +offset, if present. + symbol = line.split(" ", 1)[1].split("+", 1)[0] + if not show_full_signatures: + symbol = strip_function_parameters(symbol) + current_chain.append(symbol) + + if symbol.startswith("BytecodeHandler:"): + yield current_chain + skip_until_end_of_chain = True + elif symbol == "Stub:CEntryStub" and compiler_symbol_in_chain: + if show_all: + current_chain[-1] = "[compiler]" + yield current_chain + skip_until_end_of_chain = True + elif COMPILER_SYMBOLS_RE.match(symbol): + compiler_symbol_in_chain = True + elif symbol == "Builtin:InterpreterEntryTrampoline": + if len(current_chain) == 1: + yield ["[entry trampoline]"] + else: + # If we see an InterpreterEntryTrampoline which is not at the top of the + # chain and doesn't have a BytecodeHandler above it, then we have + # skipped the top BytecodeHandler due to the top-level stub not building + # a frame. File the chain in the [misattributed] bucket. + current_chain[-1] = "[misattributed]" + yield current_chain + skip_until_end_of_chain = True + + +def calculate_samples_count_per_callchain(callchains): + chain_counters = collections.defaultdict(int) + for callchain in callchains: + key = ";".join(reversed(callchain)) + chain_counters[key] += 1 + return chain_counters.items() + + +def calculate_samples_count_per_handler(callchains): + def strip_handler_prefix_if_any(handler): + return handler if handler[0] == "[" else handler.split(":", 1)[1] + + handler_counters = collections.defaultdict(int) + for callchain in callchains: + handler = strip_handler_prefix_if_any(callchain[-1]) + handler_counters[handler] += 1 + return handler_counters.items() + + +def write_flamegraph_input_file(output_stream, callchains): + for callchain, count in calculate_samples_count_per_callchain(callchains): + output_stream.write("{}; {}\n".format(callchain, count)) + + +def write_handlers_report(output_stream, callchains): + handler_counters = calculate_samples_count_per_handler(callchains) + samples_num = sum(counter for _, counter in handler_counters) + # Sort by decreasing number of samples + handler_counters.sort(key=lambda entry: entry[1], reverse=True) + for bytecode_name, count in handler_counters: + output_stream.write( + "{}\t{}\t{:.3f}%\n".format(bytecode_name, count, + 100. * count / samples_num)) + + +def parse_command_line(): + command_line_parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description=__DESCRIPTION, + epilog=__HELP_EPILOGUE) + + command_line_parser.add_argument( + "perf_filename", + help="perf sample file to process (default: perf.data)", + nargs="?", + default="perf.data", + metavar="" + ) + command_line_parser.add_argument( + "--flamegraph", "-f", + help="output an input file for flamegraph.pl, not a report", + action="store_true", + dest="output_flamegraph" + ) + command_line_parser.add_argument( + "--show-all", "-a", + help="show samples outside Ignition bytecode handlers", + action="store_true" + ) + command_line_parser.add_argument( + "--show-full-signatures", "-s", + help="show full signatures instead of function names", + action="store_true" + ) + command_line_parser.add_argument( + "--output", "-o", + help="output file name (stdout if omitted)", + type=argparse.FileType('wt'), + default=sys.stdout, + metavar="", + dest="output_stream" + ) + + return command_line_parser.parse_args() + + +def main(): + program_options = parse_command_line() + + perf = subprocess.Popen(["perf", "script", "--fields", "ip,sym", + "-i", program_options.perf_filename], + stdout=subprocess.PIPE) + + callchains = collapsed_callchains_generator( + perf.stdout, program_options.show_all, + program_options.show_full_signatures) + + if program_options.output_flamegraph: + write_flamegraph_input_file(program_options.output_stream, callchains) + else: + write_handlers_report(program_options.output_stream, callchains) + + +if __name__ == "__main__": + main() diff --git a/deps/v8/tools/ignition/linux_perf_report_test.py b/deps/v8/tools/ignition/linux_perf_report_test.py new file mode 100644 index 00000000000000..d9cef75dfff275 --- /dev/null +++ b/deps/v8/tools/ignition/linux_perf_report_test.py @@ -0,0 +1,147 @@ +# Copyright 2016 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import linux_perf_report as ipr +import StringIO +import unittest + + +PERF_SCRIPT_OUTPUT = """ +# This line is a comment +# This should be ignored too +# +# cdefab01 aRandomSymbol::Name(to, be, ignored) + + 00000000 firstSymbol + 00000123 secondSymbol + + 01234567 foo + abcdef76 BytecodeHandler:bar + 76543210 baz + +# Indentation shouldn't matter (neither should this line) + + 01234567 foo + abcdef76 BytecodeHandler:bar + 76543210 baz + + 01234567 beep + abcdef76 BytecodeHandler:bar + 76543210 baz + + 01234567 hello + abcdef76 v8::internal::Compiler + 00000000 Stub:CEntryStub + 76543210 world + 11111111 BytecodeHandler:nope + + 00000000 Lost + 11111111 Builtin:InterpreterEntryTrampoline + 22222222 bar + + 11111111 Builtin:InterpreterEntryTrampoline + 22222222 bar +""" + + +class LinuxPerfReportTest(unittest.TestCase): + def test_collapsed_callchains_generator(self): + perf_stream = StringIO.StringIO(PERF_SCRIPT_OUTPUT) + callchains = list(ipr.collapsed_callchains_generator(perf_stream)) + self.assertListEqual(callchains, [ + ["foo", "BytecodeHandler:bar"], + ["foo", "BytecodeHandler:bar"], + ["beep", "BytecodeHandler:bar"], + ["[entry trampoline]"], + ]) + + def test_collapsed_callchains_generator_show_other(self): + perf_stream = StringIO.StringIO(PERF_SCRIPT_OUTPUT) + callchains = list(ipr.collapsed_callchains_generator(perf_stream, + show_all=True)) + self.assertListEqual(callchains, [ + ['firstSymbol', 'secondSymbol', '[other]'], + ["foo", "BytecodeHandler:bar"], + ["foo", "BytecodeHandler:bar"], + ["beep", "BytecodeHandler:bar"], + ["hello", "v8::internal::Compiler", "[compiler]"], + ["Lost", "[misattributed]"], + ["[entry trampoline]"], + ]) + + def test_calculate_samples_count_per_callchain(self): + counters = ipr.calculate_samples_count_per_callchain([ + ["foo", "BytecodeHandler:bar"], + ["foo", "BytecodeHandler:bar"], + ["beep", "BytecodeHandler:bar"], + ["hello", "v8::internal::Compiler", "[compiler]"], + ]) + self.assertItemsEqual(counters, [ + ('BytecodeHandler:bar;foo', 2), + ('BytecodeHandler:bar;beep', 1), + ('[compiler];v8::internal::Compiler;hello', 1), + ]) + + def test_calculate_samples_count_per_callchain(self): + counters = ipr.calculate_samples_count_per_callchain([ + ["foo", "BytecodeHandler:bar"], + ["foo", "BytecodeHandler:bar"], + ["beep", "BytecodeHandler:bar"], + ]) + self.assertItemsEqual(counters, [ + ('BytecodeHandler:bar;foo', 2), + ('BytecodeHandler:bar;beep', 1), + ]) + + def test_calculate_samples_count_per_handler_show_compile(self): + counters = ipr.calculate_samples_count_per_handler([ + ["foo", "BytecodeHandler:bar"], + ["foo", "BytecodeHandler:bar"], + ["beep", "BytecodeHandler:bar"], + ["hello", "v8::internal::Compiler", "[compiler]"], + ]) + self.assertItemsEqual(counters, [ + ("bar", 3), + ("[compiler]", 1) + ]) + + def test_calculate_samples_count_per_handler_(self): + counters = ipr.calculate_samples_count_per_handler([ + ["foo", "BytecodeHandler:bar"], + ["foo", "BytecodeHandler:bar"], + ["beep", "BytecodeHandler:bar"], + ]) + self.assertItemsEqual(counters, [("bar", 3)]) + + def test_multiple_handlers(self): + perf_stream = StringIO.StringIO(""" + 0000 foo(bar) + 1234 BytecodeHandler:first + 5678 a::random::call(something, else) + 9abc BytecodeHandler:second + def0 otherIrrelevant(stuff) + 1111 entrypoint + """) + callchains = list(ipr.collapsed_callchains_generator(perf_stream, False)) + self.assertListEqual(callchains, [ + ["foo", "BytecodeHandler:first"], + ]) + + def test_compiler_symbols_regex(self): + compiler_symbols = [ + "v8::internal::Parser", + "v8::internal::(anonymous namespace)::Compile", + "v8::internal::Compiler::foo", + ] + for compiler_symbol in compiler_symbols: + self.assertTrue(ipr.COMPILER_SYMBOLS_RE.match(compiler_symbol)) + + def test_strip_function_parameters(self): + def should_match(signature, name): + self.assertEqual(ipr.strip_function_parameters(signature), name) + + should_match("foo(bar)", "foo"), + should_match("Foo(foomatic::(anonymous)::bar(baz))", "Foo"), + should_match("v8::(anonymous ns)::bar(baz, poe)", + "v8::(anonymous ns)::bar") diff --git a/deps/v8/tools/isolate_driver.py b/deps/v8/tools/isolate_driver.py index d1b39b095828dc..a6bcfbf71f9ea0 100644 --- a/deps/v8/tools/isolate_driver.py +++ b/deps/v8/tools/isolate_driver.py @@ -1,21 +1,276 @@ #!/usr/bin/env python # Copyright 2015 the V8 project authors. All rights reserved. +# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Adaptor script called through build/isolate.gypi. -Slimmed down version of chromium's isolate driver that doesn't process dynamic -dependencies. +Creates a wrapping .isolate which 'includes' the original one, that can be +consumed by tools/swarming_client/isolate.py. Path variables are determined +based on the current working directory. The relative_cwd in the .isolated file +is determined based on the .isolate file that declare the 'command' variable to +be used so the wrapping .isolate doesn't affect this value. + +This script loads build.ninja and processes it to determine all the executables +referenced by the isolated target. It adds them in the wrapping .isolate file. + +WARNING: The target to use for build.ninja analysis is the base name of the +.isolate file plus '_run'. For example, 'foo_test.isolate' would have the target +'foo_test_run' analysed. """ +import errno +import glob import json import logging import os +import posixpath +import StringIO import subprocess import sys +import time TOOLS_DIR = os.path.dirname(os.path.abspath(__file__)) +SWARMING_CLIENT_DIR = os.path.join(TOOLS_DIR, 'swarming_client') +SRC_DIR = os.path.dirname(TOOLS_DIR) + +sys.path.insert(0, SWARMING_CLIENT_DIR) + +import isolate_format + + +def load_ninja_recursively(build_dir, ninja_path, build_steps): + """Crudely extracts all the subninja and build referenced in ninja_path. + + In particular, it ignores rule and variable declarations. The goal is to be + performant (well, as much as python can be performant) which is currently in + the <200ms range for a complete chromium tree. As such the code is laid out + for performance instead of readability. + """ + logging.debug('Loading %s', ninja_path) + try: + with open(os.path.join(build_dir, ninja_path), 'rb') as f: + line = None + merge_line = '' + subninja = [] + for line in f: + line = line.rstrip() + if not line: + continue + + if line[-1] == '$': + # The next line needs to be merged in. + merge_line += line[:-1] + continue + + if merge_line: + line = merge_line + line + merge_line = '' + + statement = line[:line.find(' ')] + if statement == 'build': + # Save the dependency list as a raw string. Only the lines needed will + # be processed with raw_build_to_deps(). This saves a good 70ms of + # processing time. + build_target, dependencies = line[6:].split(': ', 1) + # Interestingly, trying to be smart and only saving the build steps + # with the intended extensions ('', '.stamp', '.so') slows down + # parsing even if 90% of the build rules can be skipped. + # On Windows, a single step may generate two target, so split items + # accordingly. It has only been seen for .exe/.exe.pdb combos. + for i in build_target.strip().split(): + build_steps[i] = dependencies + elif statement == 'subninja': + subninja.append(line[9:]) + except IOError: + print >> sys.stderr, 'Failed to open %s' % ninja_path + raise + + total = 1 + for rel_path in subninja: + try: + # Load each of the files referenced. + # TODO(maruel): Skip the files known to not be needed. It saves an aweful + # lot of processing time. + total += load_ninja_recursively(build_dir, rel_path, build_steps) + except IOError: + print >> sys.stderr, '... as referenced by %s' % ninja_path + raise + return total + + +def load_ninja(build_dir): + """Loads the tree of .ninja files in build_dir.""" + build_steps = {} + total = load_ninja_recursively(build_dir, 'build.ninja', build_steps) + logging.info('Loaded %d ninja files, %d build steps', total, len(build_steps)) + return build_steps + + +def using_blacklist(item): + """Returns True if an item should be analyzed. + + Ignores many rules that are assumed to not depend on a dynamic library. If + the assumption doesn't hold true anymore for a file format, remove it from + this list. This is simply an optimization. + """ + # *.json is ignored below, *.isolated.gen.json is an exception, it is produced + # by isolate_driver.py in 'test_isolation_mode==prepare'. + if item.endswith('.isolated.gen.json'): + return True + IGNORED = ( + '.a', '.cc', '.css', '.dat', '.def', '.frag', '.h', '.html', '.isolate', + '.js', '.json', '.manifest', '.o', '.obj', '.pak', '.png', '.pdb', '.py', + '.strings', '.test', '.txt', '.vert', + ) + # ninja files use native path format. + ext = os.path.splitext(item)[1] + if ext in IGNORED: + return False + # Special case Windows, keep .dll.lib but discard .lib. + if item.endswith('.dll.lib'): + return True + if ext == '.lib': + return False + return item not in ('', '|', '||') + + +def raw_build_to_deps(item): + """Converts a raw ninja build statement into the list of interesting + dependencies. + """ + # TODO(maruel): Use a whitelist instead? .stamp, .so.TOC, .dylib.TOC, + # .dll.lib, .exe and empty. + # The first item is the build rule, e.g. 'link', 'cxx', 'phony', etc. + return filter(using_blacklist, item.split(' ')[1:]) + + +def collect_deps(target, build_steps, dependencies_added, rules_seen): + """Recursively adds all the interesting dependencies for |target| + into |dependencies_added|. + """ + if rules_seen is None: + rules_seen = set() + if target in rules_seen: + # TODO(maruel): Figure out how it happens. + logging.warning('Circular dependency for %s!', target) + return + rules_seen.add(target) + try: + dependencies = raw_build_to_deps(build_steps[target]) + except KeyError: + logging.info('Failed to find a build step to generate: %s', target) + return + logging.debug('collect_deps(%s) -> %s', target, dependencies) + for dependency in dependencies: + dependencies_added.add(dependency) + collect_deps(dependency, build_steps, dependencies_added, rules_seen) + + +def post_process_deps(build_dir, dependencies): + """Processes the dependency list with OS specific rules.""" + def filter_item(i): + if i.endswith('.so.TOC'): + # Remove only the suffix .TOC, not the .so! + return i[:-4] + if i.endswith('.dylib.TOC'): + # Remove only the suffix .TOC, not the .dylib! + return i[:-4] + if i.endswith('.dll.lib'): + # Remove only the suffix .lib, not the .dll! + return i[:-4] + return i + + def is_exe(i): + # This script is only for adding new binaries that are created as part of + # the component build. + ext = os.path.splitext(i)[1] + # On POSIX, executables have no extension. + if ext not in ('', '.dll', '.dylib', '.exe', '.nexe', '.so'): + return False + if os.path.isabs(i): + # In some rare case, there's dependency set explicitly on files outside + # the checkout. + return False + + # Check for execute access and strip directories. This gets rid of all the + # phony rules. + p = os.path.join(build_dir, i) + return os.access(p, os.X_OK) and not os.path.isdir(p) + + return filter(is_exe, map(filter_item, dependencies)) + + +def create_wrapper(args, isolate_index, isolated_index): + """Creates a wrapper .isolate that add dynamic libs. + + The original .isolate is not modified. + """ + cwd = os.getcwd() + isolate = args[isolate_index] + # The code assumes the .isolate file is always specified path-less in cwd. Fix + # if this assumption doesn't hold true. + assert os.path.basename(isolate) == isolate, isolate + + # This will look like ../out/Debug. This is based against cwd. Note that this + # must equal the value provided as PRODUCT_DIR. + build_dir = os.path.dirname(args[isolated_index]) + + # This will look like chrome/unit_tests.isolate. It is based against SRC_DIR. + # It's used to calculate temp_isolate. + src_isolate = os.path.relpath(os.path.join(cwd, isolate), SRC_DIR) + + # The wrapping .isolate. This will look like + # ../out/Debug/gen/chrome/unit_tests.isolate. + temp_isolate = os.path.join(build_dir, 'gen', src_isolate) + temp_isolate_dir = os.path.dirname(temp_isolate) + + # Relative path between the new and old .isolate file. + isolate_relpath = os.path.relpath( + '.', temp_isolate_dir).replace(os.path.sep, '/') + + # It's a big assumption here that the name of the isolate file matches the + # primary target '_run'. Fix accordingly if this doesn't hold true, e.g. + # complain to maruel@. + target = isolate[:-len('.isolate')] + '_run' + build_steps = load_ninja(build_dir) + binary_deps = set() + collect_deps(target, build_steps, binary_deps, None) + binary_deps = post_process_deps(build_dir, binary_deps) + logging.debug( + 'Binary dependencies:%s', ''.join('\n ' + i for i in binary_deps)) + + # Now do actual wrapping .isolate. + isolate_dict = { + 'includes': [ + posixpath.join(isolate_relpath, isolate), + ], + 'variables': { + # Will look like ['<(PRODUCT_DIR)/lib/flibuser_prefs.so']. + 'files': sorted( + '<(PRODUCT_DIR)/%s' % i.replace(os.path.sep, '/') + for i in binary_deps), + }, + } + # Some .isolate files have the same temp directory and the build system may + # run this script in parallel so make directories safely here. + try: + os.makedirs(temp_isolate_dir) + except OSError as e: + if e.errno != errno.EEXIST: + raise + comment = ( + '# Warning: this file was AUTOGENERATED.\n' + '# DO NO EDIT.\n') + out = StringIO.StringIO() + isolate_format.print_all(comment, isolate_dict, out) + isolate_content = out.getvalue() + with open(temp_isolate, 'wb') as f: + f.write(isolate_content) + logging.info('Added %d dynamic libs', len(binary_deps)) + logging.debug('%s', isolate_content) + args[isolate_index] = temp_isolate def prepare_isolate_call(args, output): @@ -31,13 +286,22 @@ def prepare_isolate_call(args, output): }, f, indent=2, sort_keys=True) +def rebase_directories(args, abs_base): + """Rebases all paths to be relative to abs_base.""" + def replace(index): + args[index] = os.path.relpath(os.path.abspath(args[index]), abs_base) + for i, arg in enumerate(args): + if arg in ['--isolate', '--isolated']: + replace(i + 1) + if arg == '--path-variable': + # Path variables have a triple form: --path-variable NAME . + replace(i + 2) + + def main(): logging.basicConfig(level=logging.ERROR, format='%(levelname)7s %(message)s') - if len(sys.argv) < 2: - print >> sys.stderr, 'Internal failure; mode required' - return 1 - mode = sys.argv[1] args = sys.argv[1:] + mode = args[0] if args else None isolate = None isolated = None for i, arg in enumerate(args): @@ -45,20 +309,31 @@ def main(): isolate = i + 1 if arg == '--isolated': isolated = i + 1 - if not isolate or not isolated: + if isolate is None or isolated is None or not mode: print >> sys.stderr, 'Internal failure' return 1 + # Make sure all paths are relative to the isolate file. This is an + # expectation of the go binaries. In gn, this script is not called + # relative to the isolate file, but relative to the product dir. + new_base = os.path.abspath(os.path.dirname(args[isolate])) + rebase_directories(args, new_base) + assert args[isolate] == os.path.basename(args[isolate]) + os.chdir(new_base) + + create_wrapper(args, isolate, isolated) + # In 'prepare' mode just collect all required information for postponed # isolated.py invocation later, store it in *.isolated.gen.json file. if mode == 'prepare': prepare_isolate_call(args[1:], args[isolated] + '.gen.json') return 0 - swarming_client = os.path.join(TOOLS_DIR, 'swarming_client') + swarming_client = os.path.join(SRC_DIR, 'tools', 'swarming_client') sys.stdout.flush() - return subprocess.call( + result = subprocess.call( [sys.executable, os.path.join(swarming_client, 'isolate.py')] + args) + return result if __name__ == '__main__': diff --git a/deps/v8/tools/js2c.py b/deps/v8/tools/js2c.py index d91513311429a8..b676d662e773e2 100755 --- a/deps/v8/tools/js2c.py +++ b/deps/v8/tools/js2c.py @@ -145,10 +145,12 @@ def __init__(self, args, body): self.args = args self.body = body def expand(self, mapping): - result = self.body - for key, value in mapping.items(): - result = result.replace(key, value) - return result + # Keys could be substrings of earlier values. To avoid unintended + # clobbering, apply all replacements simultaneously. + any_key_pattern = "|".join(re.escape(k) for k in mapping.iterkeys()) + def replace(match): + return mapping[match.group(0)] + return re.sub(any_key_pattern, replace, self.body) class PythonMacro: def __init__(self, args, fun): diff --git a/deps/v8/tools/jsfunfuzz/jsfunfuzz.gyp b/deps/v8/tools/jsfunfuzz/jsfunfuzz.gyp index fb0e5f4949c1b3..8938e44538b3d5 100644 --- a/deps/v8/tools/jsfunfuzz/jsfunfuzz.gyp +++ b/deps/v8/tools/jsfunfuzz/jsfunfuzz.gyp @@ -13,8 +13,8 @@ '../../src/d8.gyp:d8_run', ], 'includes': [ - '../../build/features.gypi', - '../../build/isolate.gypi', + '../../gypfiles/features.gypi', + '../../gypfiles/isolate.gypi', ], 'sources': [ 'jsfunfuzz.isolate', diff --git a/deps/v8/tools/mb/OWNERS b/deps/v8/tools/mb/OWNERS new file mode 100644 index 00000000000000..de5efcb8dc7eac --- /dev/null +++ b/deps/v8/tools/mb/OWNERS @@ -0,0 +1,3 @@ +brettw@chromium.org +dpranke@chromium.org +machenbach@chromium.org diff --git a/deps/v8/tools/mb/PRESUBMIT.py b/deps/v8/tools/mb/PRESUBMIT.py new file mode 100644 index 00000000000000..6f5307c63eb3c0 --- /dev/null +++ b/deps/v8/tools/mb/PRESUBMIT.py @@ -0,0 +1,41 @@ +# Copyright 2016 the V8 project authors. All rights reserved. +# Copyright 2015 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + + +def _CommonChecks(input_api, output_api): + results = [] + + # Run Pylint over the files in the directory. + pylint_checks = input_api.canned_checks.GetPylint(input_api, output_api) + results.extend(input_api.RunTests(pylint_checks)) + + # Run the MB unittests. + results.extend(input_api.canned_checks.RunUnitTestsInDirectory( + input_api, output_api, '.', [ r'^.+_unittest\.py$'])) + + # Validate the format of the mb_config.pyl file. + cmd = [input_api.python_executable, 'mb.py', 'validate'] + kwargs = {'cwd': input_api.PresubmitLocalPath()} + results.extend(input_api.RunTests([ + input_api.Command(name='mb_validate', + cmd=cmd, kwargs=kwargs, + message=output_api.PresubmitError)])) + + results.extend( + input_api.canned_checks.CheckLongLines( + input_api, + output_api, + maxlen=80, + source_file_filter=lambda x: 'mb_config.pyl' in x.LocalPath())) + + return results + + +def CheckChangeOnUpload(input_api, output_api): + return _CommonChecks(input_api, output_api) + + +def CheckChangeOnCommit(input_api, output_api): + return _CommonChecks(input_api, output_api) diff --git a/deps/v8/tools/mb/README.md b/deps/v8/tools/mb/README.md new file mode 100644 index 00000000000000..4e73a8e9fc762a --- /dev/null +++ b/deps/v8/tools/mb/README.md @@ -0,0 +1,22 @@ +# MB - The Meta-Build wrapper + +MB is a simple wrapper intended to provide a uniform interface to either +GYP or GN, such that users and bots can call one script and not need to +worry about whether a given bot is meant to use GN or GYP. + +It supports two main functions: + +1. "gen" - the main `gyp_chromium` / `gn gen` invocation that generates the + Ninja files needed for the build. + +2. "analyze" - the step that takes a list of modified files and a list of + desired targets and reports which targets will need to be rebuilt. + +We also use MB as a forcing function to collect all of the different +build configurations that we actually support for Chromium builds into +one place, in `//tools/mb/mb_config.pyl`. + +For more information, see: + +* [The User Guide](docs/user_guide.md) +* [The Design Spec](docs/design_spec.md) diff --git a/deps/v8/tools/mb/docs/README.md b/deps/v8/tools/mb/docs/README.md new file mode 100644 index 00000000000000..f29007d9ede41f --- /dev/null +++ b/deps/v8/tools/mb/docs/README.md @@ -0,0 +1,4 @@ +# The MB (Meta-Build wrapper) documentation + +* The [User Guide](user_guide.md) +* The [Design Spec](design_spec.md) diff --git a/deps/v8/tools/mb/docs/design_spec.md b/deps/v8/tools/mb/docs/design_spec.md new file mode 100644 index 00000000000000..33fda806e8abc8 --- /dev/null +++ b/deps/v8/tools/mb/docs/design_spec.md @@ -0,0 +1,426 @@ +# The MB (Meta-Build wrapper) design spec + +[TOC] + +## Intro + +MB is intended to address two major aspects of the GYP -> GN transition +for Chromium: + +1. "bot toggling" - make it so that we can easily flip a given bot + back and forth between GN and GYP. + +2. "bot configuration" - provide a single source of truth for all of + the different configurations (os/arch/`gyp_define` combinations) of + Chromium that are supported. + +MB must handle at least the `gen` and `analyze` steps on the bots, i.e., +we need to wrap both the `gyp_chromium` invocation to generate the +Ninja files, and the `analyze` step that takes a list of modified files +and a list of targets to build and returns which targets are affected by +the files. + +For more information on how to actually use MB, see +[the user guide](user_guide.md). + +## Design + +MB is intended to be as simple as possible, and to defer as much work as +possible to GN or GYP. It should live as a very simple Python wrapper +that offers little in the way of surprises. + +### Command line + +It is structured as a single binary that supports a list of subcommands: + +* `mb gen -c linux_rel_bot //out/Release` +* `mb analyze -m tryserver.chromium.linux -b linux_rel /tmp/input.json /tmp/output.json` + +### Configurations + +`mb` will first look for a bot config file in a set of different locations +(initially just in //ios/build/bots). Bot config files are JSON files that +contain keys for 'GYP_DEFINES' (a list of strings that will be joined together +with spaces and passed to GYP, or a dict that will be similarly converted), +'gn_args' (a list of strings that will be joined together), and an +'mb_type' field that says whether to use GN or GYP. Bot config files +require the full list of settings to be given explicitly. + +If no matching bot config file is found, `mb` looks in the +`//tools/mb/mb_config.pyl` config file to determine whether to use GYP or GN +for a particular build directory, and what set of flags (`GYP_DEFINES` or `gn +args`) to use. + +A config can either be specified directly (useful for testing) or by specifying +the master name and builder name (useful on the bots so that they do not need +to specify a config directly and can be hidden from the details). + +See the [user guide](user_guide.md#mb_config.pyl) for details. + +### Handling the analyze step + +The interface to `mb analyze` is described in the +[user\_guide](user_guide.md#mb_analyze). + +The way analyze works can be subtle and complicated (see below). + +Since the interface basically mirrors the way the "analyze" step on the bots +invokes `gyp_chromium` today, when the config is found to be a gyp config, +the arguments are passed straight through. + +It implements the equivalent functionality in GN by calling `gn refs +[list of files] --type=executable --all --as=output` and filtering the +output to match the list of targets. + +## Analyze + +The goal of the `analyze` step is to speed up the cycle time of the try servers +by only building and running the tests affected by the files in a patch, rather +than everything that might be out of date. Doing this ends up being tricky. + +We start with the following requirements and observations: + +* In an ideal (un-resource-constrained) world, we would build and test + everything that a patch affected on every patch. This does not + necessarily mean that we would build 'all' on every patch (see below). + +* In the real world, however, we do not have an infinite number of machines, + and try jobs are not infinitely fast, so we need to balance the desire + to get maximum test coverage against the desire to have reasonable cycle + times, given the number of machines we have. + +* Also, since we run most try jobs against tip-of-tree Chromium, by + the time one job completes on the bot, new patches have probably landed, + rendering the build out of date. + +* This means that the next try job may have to do a build that is out of + date due to a combination of files affected by a given patch, and files + affected for unrelated reasons. We want to rebuild and test only the + targets affected by the patch, so that we don't blame or punish the + patch author for unrelated changes. + +So: + +1. We need a way to indicate which changed files we care about and which + we don't (the affected files of a patch). + +2. We need to know which tests we might potentially want to run, and how + those are mapped onto build targets. For some kinds of tests (like + GTest-based tests), the mapping is 1:1 - if you want to run base_unittests, + you need to build base_unittests. For others (like the telemetry and + layout tests), you might need to build several executables in order to + run the tests, and that mapping might best be captured by a *meta* + target (a GN group or a GYP 'none' target like `webkit_tests`) that + depends on the right list of files. Because the GN and GYP files know + nothing about test steps, we have to have some way of mapping back + and forth between test steps and build targets. That mapping + is *not* currently available to MB (or GN or GYP), and so we have to + enough information to make it possible for the caller to do the mapping. + +3. We might also want to know when test targets are affected by data files + that aren't compiled (python scripts, or the layout tests themselves). + There's no good way to do this in GYP, but GN supports this. + +4. We also want to ensure that particular targets still compile even if they + are not actually tested; consider testing the installers themselves, or + targets that don't yet have good test coverage. We might want to use meta + targets for this purpose as well. + +5. However, for some meta targets, we don't necessarily want to rebuild the + meta target itself, perhaps just the dependencies of the meta target that + are affected by the patch. For example, if you have a meta target like + `blink_tests` that might depend on ten different test binaries. If a patch + only affects one of them (say `wtf_unittests`), you don't want to + build `blink_tests`, because that might actually also build the other nine + targets. In other words, some meta targets are *prunable*. + +6. As noted above, in the ideal case we actually have enough resources and + things are fast enough that we can afford to build everything affected by a + patch, but listing every possible target explicitly would be painful. The + GYP and GN Ninja generators provide an 'all' target that captures (nearly, + see [crbug.com/503241](crbug.com/503241)) everything, but unfortunately + neither GN nor GYP actually represents 'all' as a meta target in the build + graph, so we will need to write code to handle that specially. + +7. In some cases, we will not be able to correctly analyze the build graph to + determine the impact of a patch, and need to bail out (e.g,. if you change a + build file itself, it may not be easy to tell how that affects the graph). + In that case we should simply build and run everything. + +The interaction between 2) and 5) means that we need to treat meta targets +two different ways, and so we need to know which targets should be +pruned in the sense of 5) and which targets should be returned unchanged +so that we can map them back to the appropriate tests. + +So, we need three things as input: + +* `files`: the list of files in the patch +* `test_targets`: the list of ninja targets which, if affected by a patch, + should be reported back so that we can map them back to the appropriate + tests to run. Any meta targets in this list should *not* be pruned. +* `additional_compile_targets`: the list of ninja targets we wish to compile + *in addition to* the list in `test_targets`. Any meta targets + present in this list should be pruned (we don't need to return the + meta targets because they aren't mapped back to tests, and we don't want + to build them because we might build too much). + +We can then return two lists as output: + +* `compile_targets`, which is a list of pruned targets to be + passed to Ninja to build. It is acceptable to replace a list of + pruned targets by a meta target if it turns out that all of the + dependendencies of the target are affected by the patch (i.e., + all ten binaries that blink_tests depends on), but doing so is + not required. +* `test_targets`, which is a list of unpruned targets to be mapped + back to determine which tests to run. + +There may be substantial overlap between the two lists, but there is +no guarantee that one is a subset of the other and the two cannot be +used interchangeably or merged together without losing information and +causing the wrong thing to happen. + +The implementation is responsible for recognizing 'all' as a magic string +and mapping it onto the list of all root nodes in the build graph. + +There may be files listed in the input that don't actually exist in the build +graph: this could be either the result of an error (the file should be in the +build graph, but isn't), or perfectly fine (the file doesn't affect the build +graph at all). We can't tell these two apart, so we should ignore missing +files. + +There may be targets listed in the input that don't exist in the build +graph; unlike missing files, this can only indicate a configuration error, +and so we should return which targets are missing so the caller can +treat this as an error, if so desired. + +Any of the three inputs may be an empty list: + +* It normally doesn't make sense to call analyze at all if no files + were modified, but in rare cases we can hit a race where we try to + test a patch after it has already been committed, in which case + the list of modified files is empty. We should return 'no dependency' + in that case. + +* Passing an empty list for one or the other of test_targets and + additional_compile_targets is perfectly sensible: in the former case, + it can indicate that you don't want to run any tests, and in the latter, + it can indicate that you don't want to do build anything else in + addition to the test targets. + +* It doesn't make sense to call analyze if you don't want to compile + anything at all, so passing [] for both test_targets and + additional_compile_targets should probably return an error. + +In the output case, an empty list indicates that there was nothing to +build, or that there were no affected test targets as appropriate. + +Note that passing no arguments to Ninja is equivalent to passing +`all` to Ninja (at least given how GN and GYP work); however, we +don't want to take advantage of this in most cases because we don't +actually want to build every out of date target, only the targets +potentially affected by the files. One could try to indicate +to analyze that we wanted to use no arguments instead of an empty +list, but using the existing fields for this seems fragile and/or +confusing, and adding a new field for this seems unwarranted at this time. + +There is an "error" field in case something goes wrong (like the +empty file list case, above, or an internal error in MB/GYP/GN). The +analyze code should also return an error code to the shell if appropriate +to indicate that the command failed. + +In the case where build files themselves are modified and analyze may +not be able to determine a correct answer (point 7 above, where we return +"Found dependency (all)"), we should also return the `test_targets` unmodified +and return the union of `test_targets` and `additional_compile_targets` for +`compile_targets`, to avoid confusion. + +### Examples + +Continuing the example given above, suppose we have the following build +graph: + +* `blink_tests` is a meta target that depends on `webkit_unit_tests`, + `wtf_unittests`, and `webkit_tests` and represents all of the targets + needed to fully test Blink. Each of those is a separate test step. +* `webkit_tests` is also a meta target; it depends on `content_shell` + and `image_diff`. +* `base_unittests` is a separate test binary. +* `wtf_unittests` depends on `Assertions.cpp` and `AssertionsTest.cpp`. +* `webkit_unit_tests` depends on `WebNode.cpp` and `WebNodeTest.cpp`. +* `content_shell` depends on `WebNode.cpp` and `Assertions.cpp`. +* `base_unittests` depends on `logging.cc` and `logging_unittest.cc`. + +#### Example 1 + +We wish to run 'wtf_unittests' and 'webkit_tests' on a bot, but not +compile any additional targets. + +If a patch touches WebNode.cpp, then analyze gets as input: + + { + "files": ["WebNode.cpp"], + "test_targets": ["wtf_unittests", "webkit_tests"], + "additional_compile_targets": [] + } + +and should return as output: + + { + "status": "Found dependency", + "compile_targets": ["webkit_unit_tests"], + "test_targets": ["webkit_tests"] + } + +Note how `webkit_tests` was pruned in compile_targets but not in test_targets. + +#### Example 2 + +Using the same patch as Example 1, assume we wish to run only `wtf_unittests`, +but additionally build everything needed to test Blink (`blink_tests`): + +We pass as input: + + { + "files": ["WebNode.cpp"], + "test_targets": ["wtf_unittests"], + "additional_compile_targets": ["blink_tests"] + } + +And should get as output: + + { + "status": "Found dependency", + "compile_targets": ["webkit_unit_tests"], + "test_targets": [] + } + +Here `blink_tests` was pruned in the output compile_targets, and +test_targets was empty, since blink_tests was not listed in the input +test_targets. + +#### Example 3 + +Build everything, but do not run any tests. + +Input: + + { + "files": ["WebNode.cpp"], + "test_targets": [], + "additional_compile_targets": ["all"] + } + +Output: + + { + "status": "Found dependency", + "compile_targets": ["webkit_unit_tests", "content_shell"], + "test_targets": [] + } + +#### Example 4 + +Same as Example 2, but a build file was modified instead of a source file. + +Input: + + { + "files": ["BUILD.gn"], + "test_targets": ["wtf_unittests"], + "additional_compile_targets": ["blink_tests"] + } + +Output: + + { + "status": "Found dependency (all)", + "compile_targets": ["webkit_unit_tests", "wtf_unittests"], + "test_targets": ["wtf_unittests"] + } + +test_targets was returned unchanged, compile_targets was pruned. + +## Random Requirements and Rationale + +This section is collection of semi-organized notes on why MB is the way +it is ... + +### in-tree or out-of-tree + +The first issue is whether or not this should exist as a script in +Chromium at all; an alternative would be to simply change the bot +configurations to know whether to use GYP or GN, and which flags to +pass. + +That would certainly work, but experience over the past two years +suggests a few things: + + * we should push as much logic as we can into the source repositories + so that they can be versioned and changed atomically with changes to + the product code; having to coordinate changes between src/ and + build/ is at best annoying and can lead to weird errors. + * the infra team would really like to move to providing + product-independent services (i.e., not have to do one thing for + Chromium, another for NaCl, a third for V8, etc.). + * we found that during the SVN->GIT migration the ability to flip bot + configurations between the two via changes to a file in chromium + was very useful. + +All of this suggests that the interface between bots and Chromium should +be a simple one, hiding as much of the chromium logic as possible. + +### Why not have MB be smarter about de-duping flags? + +This just adds complexity to the MB implementation, and duplicates logic +that GYP and GN already have to support anyway; in particular, it might +require MB to know how to parse GYP and GN values. The belief is that +if MB does *not* do this, it will lead to fewer surprises. + +It will not be hard to change this if need be. + +### Integration w/ gclient runhooks + +On the bots, we will disable `gyp_chromium` as part of runhooks (using +`GYP_CHROMIUM_NO_ACTION=1`), so that mb shows up as a separate step. + +At the moment, we expect most developers to either continue to use +`gyp_chromium` in runhooks or to disable at as above if they have no +use for GYP at all. We may revisit how this works once we encourage more +people to use GN full-time (i.e., we might take `gyp_chromium` out of +runhooks altogether). + +### Config per flag set or config per (os/arch/flag set)? + +Currently, mb_config.pyl does not specify the host_os, target_os, host_cpu, or +target_cpu values for every config that Chromium runs on, it only specifies +them for when the values need to be explicitly set on the command line. + +Instead, we have one config per unique combination of flags only. + +In other words, rather than having `linux_rel_bot`, `win_rel_bot`, and +`mac_rel_bot`, we just have `rel_bot`. + +This design allows us to determine easily all of the different sets +of flags that we need to support, but *not* which flags are used on which +host/target combinations. + +It may be that we should really track the latter. Doing so is just a +config file change, however. + +### Non-goals + +* MB is not intended to replace direct invocation of GN or GYP for + complicated build scenarios (aka ChromeOS), where multiple flags need + to be set to user-defined paths for specific toolchains (e.g., where + ChromeOS needs to specify specific board types and compilers). + +* MB is not intended at this time to be something developers use frequently, + or to add a lot of features to. We hope to be able to get rid of it once + the GYP->GN migration is done, and so we should not add things for + developers that can't easily be added to GN itself. + +* MB is not intended to replace the + [CR tool](https://code.google.com/p/chromium/wiki/CRUserManual). Not + only is it only intended to replace the gyp\_chromium part of `'gclient + runhooks'`, it is not really meant as a developer-facing tool. diff --git a/deps/v8/tools/mb/docs/user_guide.md b/deps/v8/tools/mb/docs/user_guide.md new file mode 100644 index 00000000000000..9817553bf6a41d --- /dev/null +++ b/deps/v8/tools/mb/docs/user_guide.md @@ -0,0 +1,297 @@ +# The MB (Meta-Build wrapper) user guide + +[TOC] + +## Introduction + +`mb` is a simple python wrapper around the GYP and GN meta-build tools to +be used as part of the GYP->GN migration. + +It is intended to be used by bots to make it easier to manage the configuration +each bot builds (i.e., the configurations can be changed from chromium +commits), and to consolidate the list of all of the various configurations +that Chromium is built in. + +Ideally this tool will no longer be needed after the migration is complete. + +For more discussion of MB, see also [the design spec](design_spec.md). + +## MB subcommands + +### `mb analyze` + +`mb analyze` is reponsible for determining what targets are affected by +a list of files (e.g., the list of files in a patch on a trybot): + +``` +mb analyze -c chromium_linux_rel //out/Release input.json output.json +``` + +Either the `-c/--config` flag or the `-m/--master` and `-b/--builder` flags +must be specified so that `mb` can figure out which config to use. + +The first positional argument must be a GN-style "source-absolute" path +to the build directory. + +The second positional argument is a (normal) path to a JSON file containing +a single object with the following fields: + + * `files`: an array of the modified filenames to check (as paths relative to + the checkout root). + * `test_targets`: an array of (ninja) build targets that needed to run the + tests we wish to run. An empty array will be treated as if there are + no tests that will be run. + * `additional_compile_targets`: an array of (ninja) build targets that + reflect the stuff we might want to build *in addition to* the list + passed in `test_targets`. Targets in this list will be treated + specially, in the following way: if a given target is a "meta" + (GN: group, GYP: none) target like 'blink_tests' or + 'chromium_builder_tests', or even the ninja-specific 'all' target, + then only the *dependencies* of the target that are affected by + the modified files will be rebuilt (not the target itself, which + might also cause unaffected dependencies to be rebuilt). An empty + list will be treated as if there are no additional targets to build. + Empty lists for both `test_targets` and `additional_compile_targets` + would cause no work to be done, so will result in an error. + * `targets`: a legacy field that resembled a union of `compile_targets` + and `test_targets`. Support for this field will be removed once the + bots have been updated to use compile_targets and test_targets instead. + +The third positional argument is a (normal) path to where mb will write +the result, also as a JSON object. This object may contain the following +fields: + + * `error`: this should only be present if something failed. + * `compile_targets`: the list of ninja targets that should be passed + directly to the corresponding ninja / compile.py invocation. This + list may contain entries that are *not* listed in the input (see + the description of `additional_compile_targets` above and + [design_spec.md](the design spec) for how this works). + * `invalid_targets`: a list of any targets that were passed in + either of the input lists that weren't actually found in the graph. + * `test_targets`: the subset of the input `test_targets` that are + potentially out of date, indicating that the matching test steps + should be re-run. + * `targets`: a legacy field that indicates the subset of the input `targets` + that depend on the input `files`. + * `build_targets`: a legacy field that indicates the minimal subset of + targets needed to build all of `targets` that were affected. + * `status`: a field containing one of three strings: + + * `"Found dependency"` (build the `compile_targets`) + * `"No dependency"` (i.e., no build needed) + * `"Found dependency (all)"` (`test_targets` is returned as-is; + `compile_targets` should contain the union of `test_targets` and + `additional_compile_targets`. In this case the targets do not + need to be pruned). + +See [design_spec.md](the design spec) for more details and examples; the +differences can be subtle. We won't even go into how the `targets` and +`build_targets` differ from each other or from `compile_targets` and +`test_targets`. + +The `-b/--builder`, `-c/--config`, `-f/--config-file`, `-m/--master`, +`-q/--quiet`, and `-v/--verbose` flags work as documented for `mb gen`. + +### `mb audit` + +`mb audit` is used to track the progress of the GYP->GN migration. You can +use it to check a single master, or all the masters we care about. See +`mb help audit` for more details (most people are not expected to care about +this). + +### `mb gen` + +`mb gen` is responsible for generating the Ninja files by invoking either GYP +or GN as appropriate. It takes arguments to specify a build config and +a directory, then runs GYP or GN as appropriate: + +``` +% mb gen -m tryserver.chromium.linux -b linux_rel //out/Release +% mb gen -c linux_rel_trybot //out/Release +``` + +Either the `-c/--config` flag or the `-m/--master` and `-b/--builder` flags +must be specified so that `mb` can figure out which config to use. The +`--phase` flag must also be used with builders that have multiple +build/compile steps (and only with those builders). + +By default, MB will look for a bot config file under `//ios/build/bots` (see +[design_spec.md](the design spec) for details of how the bot config files +work). If no matching one is found, will then look in +`//tools/mb/mb_config.pyl` to look up the config information, but you can +specify a custom config file using the `-f/--config-file` flag. + +The path must be a GN-style "source-absolute" path (as above). + +You can pass the `-n/--dryrun` flag to mb gen to see what will happen without +actually writing anything. + +You can pass the `-q/--quiet` flag to get mb to be silent unless there is an +error, and pass the `-v/--verbose` flag to get mb to log all of the files +that are read and written, and all the commands that are run. + +If the build config will use the Goma distributed-build system, you can pass +the path to your Goma client in the `-g/--goma-dir` flag, and it will be +incorporated into the appropriate flags for GYP or GN as needed. + +If gen ends up using GYP, the path must have a valid GYP configuration as the +last component of the path (i.e., specify `//out/Release_x64`, not `//out`). +The gyp script defaults to `//build/gyp_chromium`, but can be overridden with +the `--gyp-script` flag, e.g. `--gyp-script=gypfiles/gyp_v8`. + +### `mb help` + +Produces help output on the other subcommands + +### `mb lookup` + +Prints what command will be run by `mb gen` (like `mb gen -n` but does +not require you to specify a path). + +The `-b/--builder`, `-c/--config`, `-f/--config-file`, `-m/--master`, +`--phase`, `-q/--quiet`, and `-v/--verbose` flags work as documented for +`mb gen`. + +### `mb validate` + +Does internal checking to make sure the config file is syntactically +valid and that all of the entries are used properly. It does not validate +that the flags make sense, or that the builder names are legal or +comprehensive, but it does complain about configs and mixins that aren't +used. + +The `-f/--config-file` and `-q/--quiet` flags work as documented for +`mb gen`. + +This is mostly useful as a presubmit check and for verifying changes to +the config file. + +## Isolates and Swarming + +`mb gen` is also responsible for generating the `.isolate` and +`.isolated.gen.json` files needed to run test executables through swarming +in a GN build (in a GYP build, this is done as part of the compile step). + +If you wish to generate the isolate files, pass `mb gen` the +`--swarming-targets-file` command line argument; that arg should be a path +to a file containing a list of ninja build targets to compute the runtime +dependencies for (on Windows, use the ninja target name, not the file, so +`base_unittests`, not `base_unittests.exe`). + +MB will take this file, translate each build target to the matching GN +label (e.g., `base_unittests` -> `//base:base_unittests`, write that list +to a file called `runtime_deps` in the build directory, and pass that to +`gn gen $BUILD ... --runtime-deps-list-file=$BUILD/runtime_deps`. + +Once GN has computed the lists of runtime dependencies, MB will then +look up the command line for each target (currently this is hard-coded +in [mb.py](https://code.google.com/p/chromium/codesearch?q=mb.py#chromium/src/tools/mb/mb.py&q=mb.py%20GetIsolateCommand&sq=package:chromium&type=cs)), and write out the +matching `.isolate` and `.isolated.gen.json` files. + +## The `mb_config.pyl` config file + +The `mb_config.pyl` config file is intended to enumerate all of the +supported build configurations for Chromium. Generally speaking, you +should never need to (or want to) build a configuration that isn't +listed here, and so by using the configs in this file you can avoid +having to juggle long lists of GYP_DEFINES and gn args by hand. + +`mb_config.pyl` is structured as a file containing a single PYthon Literal +expression: a dictionary with three main keys, `masters`, `configs` and +`mixins`. + +The `masters` key contains a nested series of dicts containing mappings +of master -> builder -> config . This allows us to isolate the buildbot +recipes from the actual details of the configs. The config should either +be a single string value representing a key in the `configs` dictionary, +or a list of strings, each of which is a key in the `configs` dictionary; +the latter case is for builders that do multiple compiles with different +arguments in a single build, and must *only* be used for such builders +(where a --phase argument must be supplied in each lookup or gen call). + +The `configs` key points to a dictionary of named build configurations. + +There should be an key in this dict for every supported configuration +of Chromium, meaning every configuration we have a bot for, and every +configuration commonly used by develpers but that we may not have a bot +for. + +The value of each key is a list of "mixins" that will define what that +build_config does. Each item in the list must be an entry in the dictionary +value of the `mixins` key. + +Each mixin value is itself a dictionary that contains one or more of the +following keys: + + * `gyp_crosscompile`: a boolean; if true, GYP_CROSSCOMPILE=1 is set in + the environment and passed to GYP. + * `gyp_defines`: a string containing a list of GYP_DEFINES. + * `gn_args`: a string containing a list of values passed to gn --args. + * `mixins`: a list of other mixins that should be included. + * `type`: a string with either the value `gyp` or `gn`; + setting this indicates which meta-build tool to use. + +When `mb gen` or `mb analyze` executes, it takes a config name, looks it +up in the 'configs' dict, and then does a left-to-right expansion of the +mixins; gyp_defines and gn_args values are concatenated, and the type values +override each other. + +For example, if you had: + +``` +{ + 'configs`: { + 'linux_release_trybot': ['gyp_release', 'trybot'], + 'gn_shared_debug': None, + } + 'mixins': { + 'bot': { + 'gyp_defines': 'use_goma=1 dcheck_always_on=0', + 'gn_args': 'use_goma=true dcheck_always_on=false', + }, + 'debug': { + 'gn_args': 'is_debug=true', + }, + 'gn': {'type': 'gn'}, + 'gyp_release': { + 'mixins': ['release'], + 'type': 'gyp', + }, + 'release': { + 'gn_args': 'is_debug=false', + } + 'shared': { + 'gn_args': 'is_component_build=true', + 'gyp_defines': 'component=shared_library', + }, + 'trybot': { + 'gyp_defines': 'dcheck_always_on=1', + 'gn_args': 'dcheck_always_on=true', + } + } +} +``` + +and you ran `mb gen -c linux_release_trybot //out/Release`, it would +translate into a call to `gyp_chromium -G Release` with `GYP_DEFINES` set to +`"use_goma=true dcheck_always_on=false dcheck_always_on=true"`. + +(From that you can see that mb is intentionally dumb and does not +attempt to de-dup the flags, it lets gyp do that). + +## Debugging MB + +By design, MB should be simple enough that very little can go wrong. + +The most obvious issue is that you might see different commands being +run than you expect; running `'mb -v'` will print what it's doing and +run the commands; `'mb -n'` will print what it will do but *not* run +the commands. + +If you hit weirder things than that, add some print statements to the +python script, send a question to gn-dev@chromium.org, or +[file a bug](https://crbug.com/new) with the label +'mb' and cc: dpranke@chromium.org. + + diff --git a/deps/v8/tools/mb/mb b/deps/v8/tools/mb/mb new file mode 100755 index 00000000000000..d3a0cdf019c6d1 --- /dev/null +++ b/deps/v8/tools/mb/mb @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +# Copyright 2015 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +base_dir=$(dirname "$0") + +PYTHONDONTWRITEBYTECODE=1 exec python "$base_dir/mb.py" "$@" diff --git a/deps/v8/tools/mb/mb.bat b/deps/v8/tools/mb/mb.bat new file mode 100755 index 00000000000000..a82770e714a24c --- /dev/null +++ b/deps/v8/tools/mb/mb.bat @@ -0,0 +1,6 @@ +@echo off +setlocal +:: This is required with cygwin only. +PATH=%~dp0;%PATH% +set PYTHONDONTWRITEBYTECODE=1 +call python "%~dp0mb.py" %* diff --git a/deps/v8/tools/mb/mb.py b/deps/v8/tools/mb/mb.py new file mode 100755 index 00000000000000..536dc00fcb16c4 --- /dev/null +++ b/deps/v8/tools/mb/mb.py @@ -0,0 +1,1500 @@ +#!/usr/bin/env python +# Copyright 2016 the V8 project authors. All rights reserved. +# Copyright 2015 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""MB - the Meta-Build wrapper around GYP and GN + +MB is a wrapper script for GYP and GN that can be used to generate build files +for sets of canned configurations and analyze them. +""" + +from __future__ import print_function + +import argparse +import ast +import errno +import json +import os +import pipes +import pprint +import re +import shutil +import sys +import subprocess +import tempfile +import traceback +import urllib2 + +from collections import OrderedDict + +CHROMIUM_SRC_DIR = os.path.dirname(os.path.dirname(os.path.dirname( + os.path.abspath(__file__)))) +sys.path = [os.path.join(CHROMIUM_SRC_DIR, 'build')] + sys.path + +import gn_helpers + + +def main(args): + mbw = MetaBuildWrapper() + return mbw.Main(args) + + +class MetaBuildWrapper(object): + def __init__(self): + self.chromium_src_dir = CHROMIUM_SRC_DIR + self.default_config = os.path.join(self.chromium_src_dir, 'infra', 'mb', + 'mb_config.pyl') + self.executable = sys.executable + self.platform = sys.platform + self.sep = os.sep + self.args = argparse.Namespace() + self.configs = {} + self.masters = {} + self.mixins = {} + + def Main(self, args): + self.ParseArgs(args) + try: + ret = self.args.func() + if ret: + self.DumpInputFiles() + return ret + except KeyboardInterrupt: + self.Print('interrupted, exiting', stream=sys.stderr) + return 130 + except Exception: + self.DumpInputFiles() + s = traceback.format_exc() + for l in s.splitlines(): + self.Print(l) + return 1 + + def ParseArgs(self, argv): + def AddCommonOptions(subp): + subp.add_argument('-b', '--builder', + help='builder name to look up config from') + subp.add_argument('-m', '--master', + help='master name to look up config from') + subp.add_argument('-c', '--config', + help='configuration to analyze') + subp.add_argument('--phase', type=int, + help=('build phase for a given build ' + '(int in [1, 2, ...))')) + subp.add_argument('-f', '--config-file', metavar='PATH', + default=self.default_config, + help='path to config file ' + '(default is //tools/mb/mb_config.pyl)') + subp.add_argument('-g', '--goma-dir', + help='path to goma directory') + subp.add_argument('--gyp-script', metavar='PATH', + default=self.PathJoin('build', 'gyp_chromium'), + help='path to gyp script relative to project root ' + '(default is %(default)s)') + subp.add_argument('--android-version-code', + help='Sets GN arg android_default_version_code and ' + 'GYP_DEFINE app_manifest_version_code') + subp.add_argument('--android-version-name', + help='Sets GN arg android_default_version_name and ' + 'GYP_DEFINE app_manifest_version_name') + subp.add_argument('-n', '--dryrun', action='store_true', + help='Do a dry run (i.e., do nothing, just print ' + 'the commands that will run)') + subp.add_argument('-v', '--verbose', action='store_true', + help='verbose logging') + + parser = argparse.ArgumentParser(prog='mb') + subps = parser.add_subparsers() + + subp = subps.add_parser('analyze', + help='analyze whether changes to a set of files ' + 'will cause a set of binaries to be rebuilt.') + AddCommonOptions(subp) + subp.add_argument('path', nargs=1, + help='path build was generated into.') + subp.add_argument('input_path', nargs=1, + help='path to a file containing the input arguments ' + 'as a JSON object.') + subp.add_argument('output_path', nargs=1, + help='path to a file containing the output arguments ' + 'as a JSON object.') + subp.set_defaults(func=self.CmdAnalyze) + + subp = subps.add_parser('gen', + help='generate a new set of build files') + AddCommonOptions(subp) + subp.add_argument('--swarming-targets-file', + help='save runtime dependencies for targets listed ' + 'in file.') + subp.add_argument('path', nargs=1, + help='path to generate build into') + subp.set_defaults(func=self.CmdGen) + + subp = subps.add_parser('isolate', + help='generate the .isolate files for a given' + 'binary') + AddCommonOptions(subp) + subp.add_argument('path', nargs=1, + help='path build was generated into') + subp.add_argument('target', nargs=1, + help='ninja target to generate the isolate for') + subp.set_defaults(func=self.CmdIsolate) + + subp = subps.add_parser('lookup', + help='look up the command for a given config or ' + 'builder') + AddCommonOptions(subp) + subp.set_defaults(func=self.CmdLookup) + + subp = subps.add_parser( + 'run', + help='build and run the isolated version of a ' + 'binary', + formatter_class=argparse.RawDescriptionHelpFormatter) + subp.description = ( + 'Build, isolate, and run the given binary with the command line\n' + 'listed in the isolate. You may pass extra arguments after the\n' + 'target; use "--" if the extra arguments need to include switches.\n' + '\n' + 'Examples:\n' + '\n' + ' % tools/mb/mb.py run -m chromium.linux -b "Linux Builder" \\\n' + ' //out/Default content_browsertests\n' + '\n' + ' % tools/mb/mb.py run out/Default content_browsertests\n' + '\n' + ' % tools/mb/mb.py run out/Default content_browsertests -- \\\n' + ' --test-launcher-retry-limit=0' + '\n' + ) + + AddCommonOptions(subp) + subp.add_argument('-j', '--jobs', dest='jobs', type=int, + help='Number of jobs to pass to ninja') + subp.add_argument('--no-build', dest='build', default=True, + action='store_false', + help='Do not build, just isolate and run') + subp.add_argument('path', nargs=1, + help=('path to generate build into (or use).' + ' This can be either a regular path or a ' + 'GN-style source-relative path like ' + '//out/Default.')) + subp.add_argument('target', nargs=1, + help='ninja target to build and run') + subp.add_argument('extra_args', nargs='*', + help=('extra args to pass to the isolate to run. Use ' + '"--" as the first arg if you need to pass ' + 'switches')) + subp.set_defaults(func=self.CmdRun) + + subp = subps.add_parser('validate', + help='validate the config file') + subp.add_argument('-f', '--config-file', metavar='PATH', + default=self.default_config, + help='path to config file ' + '(default is //infra/mb/mb_config.pyl)') + subp.set_defaults(func=self.CmdValidate) + + subp = subps.add_parser('audit', + help='Audit the config file to track progress') + subp.add_argument('-f', '--config-file', metavar='PATH', + default=self.default_config, + help='path to config file ' + '(default is //infra/mb/mb_config.pyl)') + subp.add_argument('-i', '--internal', action='store_true', + help='check internal masters also') + subp.add_argument('-m', '--master', action='append', + help='master to audit (default is all non-internal ' + 'masters in file)') + subp.add_argument('-u', '--url-template', action='store', + default='https://build.chromium.org/p/' + '{master}/json/builders', + help='URL scheme for JSON APIs to buildbot ' + '(default: %(default)s) ') + subp.add_argument('-c', '--check-compile', action='store_true', + help='check whether tbd and master-only bots actually' + ' do compiles') + subp.set_defaults(func=self.CmdAudit) + + subp = subps.add_parser('help', + help='Get help on a subcommand.') + subp.add_argument(nargs='?', action='store', dest='subcommand', + help='The command to get help for.') + subp.set_defaults(func=self.CmdHelp) + + self.args = parser.parse_args(argv) + + def DumpInputFiles(self): + + def DumpContentsOfFilePassedTo(arg_name, path): + if path and self.Exists(path): + self.Print("\n# To recreate the file passed to %s:" % arg_name) + self.Print("%% cat > %s <GN migration on the bots.""" + + # First, make sure the config file is okay, but don't print anything + # if it is (it will throw an error if it isn't). + self.CmdValidate(print_ok=False) + + stats = OrderedDict() + STAT_MASTER_ONLY = 'Master only' + STAT_CONFIG_ONLY = 'Config only' + STAT_TBD = 'Still TBD' + STAT_GYP = 'Still GYP' + STAT_DONE = 'Done (on GN)' + stats[STAT_MASTER_ONLY] = 0 + stats[STAT_CONFIG_ONLY] = 0 + stats[STAT_TBD] = 0 + stats[STAT_GYP] = 0 + stats[STAT_DONE] = 0 + + def PrintBuilders(heading, builders, notes): + stats.setdefault(heading, 0) + stats[heading] += len(builders) + if builders: + self.Print(' %s:' % heading) + for builder in sorted(builders): + self.Print(' %s%s' % (builder, notes[builder])) + + self.ReadConfigFile() + + masters = self.args.master or self.masters + for master in sorted(masters): + url = self.args.url_template.replace('{master}', master) + + self.Print('Auditing %s' % master) + + MASTERS_TO_SKIP = ( + 'client.skia', + 'client.v8.fyi', + 'tryserver.v8', + ) + if master in MASTERS_TO_SKIP: + # Skip these bots because converting them is the responsibility of + # those teams and out of scope for the Chromium migration to GN. + self.Print(' Skipped (out of scope)') + self.Print('') + continue + + INTERNAL_MASTERS = ('official.desktop', 'official.desktop.continuous', + 'internal.client.kitchensync') + if master in INTERNAL_MASTERS and not self.args.internal: + # Skip these because the servers aren't accessible by default ... + self.Print(' Skipped (internal)') + self.Print('') + continue + + try: + # Fetch the /builders contents from the buildbot master. The + # keys of the dict are the builder names themselves. + json_contents = self.Fetch(url) + d = json.loads(json_contents) + except Exception as e: + self.Print(str(e)) + return 1 + + config_builders = set(self.masters[master]) + master_builders = set(d.keys()) + both = master_builders & config_builders + master_only = master_builders - config_builders + config_only = config_builders - master_builders + tbd = set() + gyp = set() + done = set() + notes = {builder: '' for builder in config_builders | master_builders} + + for builder in both: + config = self.masters[master][builder] + if config == 'tbd': + tbd.add(builder) + elif isinstance(config, list): + vals = self.FlattenConfig(config[0]) + if vals['type'] == 'gyp': + gyp.add(builder) + else: + done.add(builder) + elif config.startswith('//'): + done.add(builder) + else: + vals = self.FlattenConfig(config) + if vals['type'] == 'gyp': + gyp.add(builder) + else: + done.add(builder) + + if self.args.check_compile and (tbd or master_only): + either = tbd | master_only + for builder in either: + notes[builder] = ' (' + self.CheckCompile(master, builder) +')' + + if master_only or config_only or tbd or gyp: + PrintBuilders(STAT_MASTER_ONLY, master_only, notes) + PrintBuilders(STAT_CONFIG_ONLY, config_only, notes) + PrintBuilders(STAT_TBD, tbd, notes) + PrintBuilders(STAT_GYP, gyp, notes) + else: + self.Print(' All GN!') + + stats[STAT_DONE] += len(done) + + self.Print('') + + fmt = '{:<27} {:>4}' + self.Print(fmt.format('Totals', str(sum(int(v) for v in stats.values())))) + self.Print(fmt.format('-' * 27, '----')) + for stat, count in stats.items(): + self.Print(fmt.format(stat, str(count))) + + return 0 + + def GetConfig(self): + build_dir = self.args.path[0] + + vals = {} + if self.args.builder or self.args.master or self.args.config: + vals = self.Lookup() + if vals['type'] == 'gn': + # Re-run gn gen in order to ensure the config is consistent with the + # build dir. + self.RunGNGen(vals) + return vals + + mb_type_path = self.PathJoin(self.ToAbsPath(build_dir), 'mb_type') + if not self.Exists(mb_type_path): + toolchain_path = self.PathJoin(self.ToAbsPath(build_dir), + 'toolchain.ninja') + if not self.Exists(toolchain_path): + self.Print('Must either specify a path to an existing GN build dir ' + 'or pass in a -m/-b pair or a -c flag to specify the ' + 'configuration') + return {} + else: + mb_type = 'gn' + else: + mb_type = self.ReadFile(mb_type_path).strip() + + if mb_type == 'gn': + vals = self.GNValsFromDir(build_dir) + else: + vals = {} + vals['type'] = mb_type + + return vals + + def GNValsFromDir(self, build_dir): + args_contents = "" + gn_args_path = self.PathJoin(self.ToAbsPath(build_dir), 'args.gn') + if self.Exists(gn_args_path): + args_contents = self.ReadFile(gn_args_path) + gn_args = [] + for l in args_contents.splitlines(): + fields = l.split(' ') + name = fields[0] + val = ' '.join(fields[2:]) + gn_args.append('%s=%s' % (name, val)) + + return { + 'gn_args': ' '.join(gn_args), + 'type': 'gn', + } + + def Lookup(self): + vals = self.ReadBotConfig() + if not vals: + self.ReadConfigFile() + config = self.ConfigFromArgs() + if config.startswith('//'): + if not self.Exists(self.ToAbsPath(config)): + raise MBErr('args file "%s" not found' % config) + vals = { + 'args_file': config, + 'cros_passthrough': False, + 'gn_args': '', + 'gyp_crosscompile': False, + 'gyp_defines': '', + 'type': 'gn', + } + else: + if not config in self.configs: + raise MBErr('Config "%s" not found in %s' % + (config, self.args.config_file)) + vals = self.FlattenConfig(config) + + # Do some basic sanity checking on the config so that we + # don't have to do this in every caller. + assert 'type' in vals, 'No meta-build type specified in the config' + assert vals['type'] in ('gn', 'gyp'), ( + 'Unknown meta-build type "%s"' % vals['gn_args']) + + return vals + + def ReadBotConfig(self): + if not self.args.master or not self.args.builder: + return {} + path = self.PathJoin(self.chromium_src_dir, 'ios', 'build', 'bots', + self.args.master, self.args.builder + '.json') + if not self.Exists(path): + return {} + + contents = json.loads(self.ReadFile(path)) + gyp_vals = contents.get('GYP_DEFINES', {}) + if isinstance(gyp_vals, dict): + gyp_defines = ' '.join('%s=%s' % (k, v) for k, v in gyp_vals.items()) + else: + gyp_defines = ' '.join(gyp_vals) + gn_args = ' '.join(contents.get('gn_args', [])) + + return { + 'args_file': '', + 'cros_passthrough': False, + 'gn_args': gn_args, + 'gyp_crosscompile': False, + 'gyp_defines': gyp_defines, + 'type': contents.get('mb_type', ''), + } + + def ReadConfigFile(self): + if not self.Exists(self.args.config_file): + raise MBErr('config file not found at %s' % self.args.config_file) + + try: + contents = ast.literal_eval(self.ReadFile(self.args.config_file)) + except SyntaxError as e: + raise MBErr('Failed to parse config file "%s": %s' % + (self.args.config_file, e)) + + self.configs = contents['configs'] + self.masters = contents['masters'] + self.mixins = contents['mixins'] + + def ConfigFromArgs(self): + if self.args.config: + if self.args.master or self.args.builder: + raise MBErr('Can not specific both -c/--config and -m/--master or ' + '-b/--builder') + + return self.args.config + + if not self.args.master or not self.args.builder: + raise MBErr('Must specify either -c/--config or ' + '(-m/--master and -b/--builder)') + + if not self.args.master in self.masters: + raise MBErr('Master name "%s" not found in "%s"' % + (self.args.master, self.args.config_file)) + + if not self.args.builder in self.masters[self.args.master]: + raise MBErr('Builder name "%s" not found under masters[%s] in "%s"' % + (self.args.builder, self.args.master, self.args.config_file)) + + config = self.masters[self.args.master][self.args.builder] + if isinstance(config, list): + if self.args.phase is None: + raise MBErr('Must specify a build --phase for %s on %s' % + (self.args.builder, self.args.master)) + phase = int(self.args.phase) + if phase < 1 or phase > len(config): + raise MBErr('Phase %d out of bounds for %s on %s' % + (phase, self.args.builder, self.args.master)) + return config[phase-1] + + if self.args.phase is not None: + raise MBErr('Must not specify a build --phase for %s on %s' % + (self.args.builder, self.args.master)) + return config + + def FlattenConfig(self, config): + mixins = self.configs[config] + vals = { + 'args_file': '', + 'cros_passthrough': False, + 'gn_args': [], + 'gyp_defines': '', + 'gyp_crosscompile': False, + 'type': None, + } + + visited = [] + self.FlattenMixins(mixins, vals, visited) + return vals + + def FlattenMixins(self, mixins, vals, visited): + for m in mixins: + if m not in self.mixins: + raise MBErr('Unknown mixin "%s"' % m) + + visited.append(m) + + mixin_vals = self.mixins[m] + + if 'cros_passthrough' in mixin_vals: + vals['cros_passthrough'] = mixin_vals['cros_passthrough'] + if 'gn_args' in mixin_vals: + if vals['gn_args']: + vals['gn_args'] += ' ' + mixin_vals['gn_args'] + else: + vals['gn_args'] = mixin_vals['gn_args'] + if 'gyp_crosscompile' in mixin_vals: + vals['gyp_crosscompile'] = mixin_vals['gyp_crosscompile'] + if 'gyp_defines' in mixin_vals: + if vals['gyp_defines']: + vals['gyp_defines'] += ' ' + mixin_vals['gyp_defines'] + else: + vals['gyp_defines'] = mixin_vals['gyp_defines'] + if 'type' in mixin_vals: + vals['type'] = mixin_vals['type'] + + if 'mixins' in mixin_vals: + self.FlattenMixins(mixin_vals['mixins'], vals, visited) + return vals + + def ClobberIfNeeded(self, vals): + path = self.args.path[0] + build_dir = self.ToAbsPath(path) + mb_type_path = self.PathJoin(build_dir, 'mb_type') + needs_clobber = False + new_mb_type = vals['type'] + if self.Exists(build_dir): + if self.Exists(mb_type_path): + old_mb_type = self.ReadFile(mb_type_path) + if old_mb_type != new_mb_type: + self.Print("Build type mismatch: was %s, will be %s, clobbering %s" % + (old_mb_type, new_mb_type, path)) + needs_clobber = True + else: + # There is no 'mb_type' file in the build directory, so this probably + # means that the prior build(s) were not done through mb, and we + # have no idea if this was a GYP build or a GN build. Clobber it + # to be safe. + self.Print("%s/mb_type missing, clobbering to be safe" % path) + needs_clobber = True + + if self.args.dryrun: + return + + if needs_clobber: + self.RemoveDirectory(build_dir) + + self.MaybeMakeDirectory(build_dir) + self.WriteFile(mb_type_path, new_mb_type) + + def RunGNGen(self, vals): + build_dir = self.args.path[0] + + cmd = self.GNCmd('gen', build_dir, '--check') + gn_args = self.GNArgs(vals) + + # Since GN hasn't run yet, the build directory may not even exist. + self.MaybeMakeDirectory(self.ToAbsPath(build_dir)) + + gn_args_path = self.ToAbsPath(build_dir, 'args.gn') + self.WriteFile(gn_args_path, gn_args, force_verbose=True) + + swarming_targets = [] + if getattr(self.args, 'swarming_targets_file', None): + # We need GN to generate the list of runtime dependencies for + # the compile targets listed (one per line) in the file so + # we can run them via swarming. We use ninja_to_gn.pyl to convert + # the compile targets to the matching GN labels. + path = self.args.swarming_targets_file + if not self.Exists(path): + self.WriteFailureAndRaise('"%s" does not exist' % path, + output_path=None) + contents = self.ReadFile(path) + swarming_targets = set(contents.splitlines()) + gn_isolate_map = ast.literal_eval(self.ReadFile(self.PathJoin( + self.chromium_src_dir, 'testing', 'buildbot', 'gn_isolate_map.pyl'))) + gn_labels = [] + err = '' + for target in swarming_targets: + target_name = self.GNTargetName(target) + if not target_name in gn_isolate_map: + err += ('test target "%s" not found\n' % target_name) + elif gn_isolate_map[target_name]['type'] == 'unknown': + err += ('test target "%s" type is unknown\n' % target_name) + else: + gn_labels.append(gn_isolate_map[target_name]['label']) + + if err: + raise MBErr('Error: Failed to match swarming targets to %s:\n%s' % + ('//testing/buildbot/gn_isolate_map.pyl', err)) + + gn_runtime_deps_path = self.ToAbsPath(build_dir, 'runtime_deps') + self.WriteFile(gn_runtime_deps_path, '\n'.join(gn_labels) + '\n') + cmd.append('--runtime-deps-list-file=%s' % gn_runtime_deps_path) + + ret, _, _ = self.Run(cmd) + if ret: + # If `gn gen` failed, we should exit early rather than trying to + # generate isolates. Run() will have already logged any error output. + self.Print('GN gen failed: %d' % ret) + return ret + + android = 'target_os="android"' in vals['gn_args'] + for target in swarming_targets: + if android: + # Android targets may be either android_apk or executable. The former + # will result in runtime_deps associated with the stamp file, while the + # latter will result in runtime_deps associated with the executable. + target_name = self.GNTargetName(target) + label = gn_isolate_map[target_name]['label'] + runtime_deps_targets = [ + target_name + '.runtime_deps', + 'obj/%s.stamp.runtime_deps' % label.replace(':', '/')] + elif gn_isolate_map[target]['type'] == 'gpu_browser_test': + if self.platform == 'win32': + runtime_deps_targets = ['browser_tests.exe.runtime_deps'] + else: + runtime_deps_targets = ['browser_tests.runtime_deps'] + elif (gn_isolate_map[target]['type'] == 'script' or + gn_isolate_map[target].get('label_type') == 'group'): + # For script targets, the build target is usually a group, + # for which gn generates the runtime_deps next to the stamp file + # for the label, which lives under the obj/ directory. + label = gn_isolate_map[target]['label'] + runtime_deps_targets = [ + 'obj/%s.stamp.runtime_deps' % label.replace(':', '/')] + elif self.platform == 'win32': + runtime_deps_targets = [target + '.exe.runtime_deps'] + else: + runtime_deps_targets = [target + '.runtime_deps'] + + for r in runtime_deps_targets: + runtime_deps_path = self.ToAbsPath(build_dir, r) + if self.Exists(runtime_deps_path): + break + else: + raise MBErr('did not generate any of %s' % + ', '.join(runtime_deps_targets)) + + command, extra_files = self.GetIsolateCommand(target, vals, + gn_isolate_map) + + runtime_deps = self.ReadFile(runtime_deps_path).splitlines() + + self.WriteIsolateFiles(build_dir, command, target, runtime_deps, + extra_files) + + return 0 + + def RunGNIsolate(self, vals): + gn_isolate_map = ast.literal_eval(self.ReadFile(self.PathJoin( + self.chromium_src_dir, 'testing', 'buildbot', 'gn_isolate_map.pyl'))) + + build_dir = self.args.path[0] + target = self.args.target[0] + target_name = self.GNTargetName(target) + command, extra_files = self.GetIsolateCommand(target, vals, gn_isolate_map) + + label = gn_isolate_map[target_name]['label'] + cmd = self.GNCmd('desc', build_dir, label, 'runtime_deps') + ret, out, _ = self.Call(cmd) + if ret: + if out: + self.Print(out) + return ret + + runtime_deps = out.splitlines() + + self.WriteIsolateFiles(build_dir, command, target, runtime_deps, + extra_files) + + ret, _, _ = self.Run([ + self.executable, + self.PathJoin('tools', 'swarming_client', 'isolate.py'), + 'check', + '-i', + self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)), + '-s', + self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target))], + buffer_output=False) + + return ret + + def WriteIsolateFiles(self, build_dir, command, target, runtime_deps, + extra_files): + isolate_path = self.ToAbsPath(build_dir, target + '.isolate') + self.WriteFile(isolate_path, + pprint.pformat({ + 'variables': { + 'command': command, + 'files': sorted(runtime_deps + extra_files), + } + }) + '\n') + + self.WriteJSON( + { + 'args': [ + '--isolated', + self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target)), + '--isolate', + self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)), + ], + 'dir': self.chromium_src_dir, + 'version': 1, + }, + isolate_path + 'd.gen.json', + ) + + def GNCmd(self, subcommand, path, *args): + if self.platform == 'linux2': + subdir, exe = 'linux64', 'gn' + elif self.platform == 'darwin': + subdir, exe = 'mac', 'gn' + else: + subdir, exe = 'win', 'gn.exe' + + gn_path = self.PathJoin(self.chromium_src_dir, 'buildtools', subdir, exe) + + return [gn_path, subcommand, path] + list(args) + + def GNArgs(self, vals): + if vals['cros_passthrough']: + if not 'GN_ARGS' in os.environ: + raise MBErr('MB is expecting GN_ARGS to be in the environment') + gn_args = os.environ['GN_ARGS'] + if not re.search('target_os.*=.*"chromeos"', gn_args): + raise MBErr('GN_ARGS is missing target_os = "chromeos": (GN_ARGS=%s)' % + gn_args) + else: + gn_args = vals['gn_args'] + + if self.args.goma_dir: + gn_args += ' goma_dir="%s"' % self.args.goma_dir + + android_version_code = self.args.android_version_code + if android_version_code: + gn_args += ' android_default_version_code="%s"' % android_version_code + + android_version_name = self.args.android_version_name + if android_version_name: + gn_args += ' android_default_version_name="%s"' % android_version_name + + # Canonicalize the arg string into a sorted, newline-separated list + # of key-value pairs, and de-dup the keys if need be so that only + # the last instance of each arg is listed. + gn_args = gn_helpers.ToGNString(gn_helpers.FromGNArgs(gn_args)) + + args_file = vals.get('args_file', None) + if args_file: + gn_args = ('import("%s")\n' % vals['args_file']) + gn_args + return gn_args + + def RunGYPGen(self, vals): + path = self.args.path[0] + + output_dir = self.ParseGYPConfigPath(path) + cmd, env = self.GYPCmd(output_dir, vals) + ret, _, _ = self.Run(cmd, env=env) + return ret + + def RunGYPAnalyze(self, vals): + output_dir = self.ParseGYPConfigPath(self.args.path[0]) + if self.args.verbose: + inp = self.ReadInputJSON(['files', 'test_targets', + 'additional_compile_targets']) + self.Print() + self.Print('analyze input:') + self.PrintJSON(inp) + self.Print() + + cmd, env = self.GYPCmd(output_dir, vals) + cmd.extend(['-f', 'analyzer', + '-G', 'config_path=%s' % self.args.input_path[0], + '-G', 'analyzer_output_path=%s' % self.args.output_path[0]]) + ret, _, _ = self.Run(cmd, env=env) + if not ret and self.args.verbose: + outp = json.loads(self.ReadFile(self.args.output_path[0])) + self.Print() + self.Print('analyze output:') + self.PrintJSON(outp) + self.Print() + + return ret + + def GetIsolateCommand(self, target, vals, gn_isolate_map): + android = 'target_os="android"' in vals['gn_args'] + + # This needs to mirror the settings in //build/config/ui.gni: + # use_x11 = is_linux && !use_ozone. + use_x11 = (self.platform == 'linux2' and + not android and + not 'use_ozone=true' in vals['gn_args']) + + asan = 'is_asan=true' in vals['gn_args'] + msan = 'is_msan=true' in vals['gn_args'] + tsan = 'is_tsan=true' in vals['gn_args'] + + target_name = self.GNTargetName(target) + test_type = gn_isolate_map[target_name]['type'] + + executable = gn_isolate_map[target_name].get('executable', target_name) + executable_suffix = '.exe' if self.platform == 'win32' else '' + + cmdline = [] + extra_files = [] + + if android and test_type != "script": + logdog_command = [ + '--logdog-bin-cmd', './../../bin/logdog_butler', + '--project', 'chromium', + '--service-account-json', + '/creds/service_accounts/service-account-luci-logdog-publisher.json', + '--prefix', 'android/swarming/logcats/${SWARMING_TASK_ID}', + '--source', '${ISOLATED_OUTDIR}/logcats', + '--name', 'unified_logcats', + ] + test_cmdline = [ + self.PathJoin('bin', 'run_%s' % target_name), + '--logcat-output-file', '${ISOLATED_OUTDIR}/logcats', + '--target-devices-file', '${SWARMING_BOT_FILE}', + '-v' + ] + cmdline = (['./../../build/android/test_wrapper/logdog_wrapper.py'] + + logdog_command + test_cmdline) + elif use_x11 and test_type == 'windowed_test_launcher': + extra_files = [ + 'xdisplaycheck', + '../../testing/test_env.py', + '../../testing/xvfb.py', + ] + cmdline = [ + '../../testing/xvfb.py', + '.', + './' + str(executable) + executable_suffix, + '--brave-new-test-launcher', + '--test-launcher-bot-mode', + '--asan=%d' % asan, + '--msan=%d' % msan, + '--tsan=%d' % tsan, + ] + elif test_type in ('windowed_test_launcher', 'console_test_launcher'): + extra_files = [ + '../../testing/test_env.py' + ] + cmdline = [ + '../../testing/test_env.py', + './' + str(executable) + executable_suffix, + '--brave-new-test-launcher', + '--test-launcher-bot-mode', + '--asan=%d' % asan, + '--msan=%d' % msan, + '--tsan=%d' % tsan, + ] + elif test_type == 'gpu_browser_test': + extra_files = [ + '../../testing/test_env.py' + ] + gtest_filter = gn_isolate_map[target]['gtest_filter'] + cmdline = [ + '../../testing/test_env.py', + './browser_tests' + executable_suffix, + '--test-launcher-bot-mode', + '--enable-gpu', + '--test-launcher-jobs=1', + '--gtest_filter=%s' % gtest_filter, + ] + elif test_type == 'script': + extra_files = [ + '../../testing/test_env.py' + ] + cmdline = [ + '../../testing/test_env.py', + '../../' + self.ToSrcRelPath(gn_isolate_map[target]['script']) + ] + elif test_type in ('raw'): + extra_files = [] + cmdline = [ + './' + str(target) + executable_suffix, + ] + + else: + self.WriteFailureAndRaise('No command line for %s found (test type %s).' + % (target, test_type), output_path=None) + + cmdline += gn_isolate_map[target_name].get('args', []) + + return cmdline, extra_files + + def ToAbsPath(self, build_path, *comps): + return self.PathJoin(self.chromium_src_dir, + self.ToSrcRelPath(build_path), + *comps) + + def ToSrcRelPath(self, path): + """Returns a relative path from the top of the repo.""" + if path.startswith('//'): + return path[2:].replace('/', self.sep) + return self.RelPath(path, self.chromium_src_dir) + + def ParseGYPConfigPath(self, path): + rpath = self.ToSrcRelPath(path) + output_dir, _, _ = rpath.rpartition(self.sep) + return output_dir + + def GYPCmd(self, output_dir, vals): + if vals['cros_passthrough']: + if not 'GYP_DEFINES' in os.environ: + raise MBErr('MB is expecting GYP_DEFINES to be in the environment') + gyp_defines = os.environ['GYP_DEFINES'] + if not 'chromeos=1' in gyp_defines: + raise MBErr('GYP_DEFINES is missing chromeos=1: (GYP_DEFINES=%s)' % + gyp_defines) + else: + gyp_defines = vals['gyp_defines'] + + goma_dir = self.args.goma_dir + + # GYP uses shlex.split() to split the gyp defines into separate arguments, + # so we can support backslashes and and spaces in arguments by quoting + # them, even on Windows, where this normally wouldn't work. + if goma_dir and ('\\' in goma_dir or ' ' in goma_dir): + goma_dir = "'%s'" % goma_dir + + if goma_dir: + gyp_defines += ' gomadir=%s' % goma_dir + + android_version_code = self.args.android_version_code + if android_version_code: + gyp_defines += ' app_manifest_version_code=%s' % android_version_code + + android_version_name = self.args.android_version_name + if android_version_name: + gyp_defines += ' app_manifest_version_name=%s' % android_version_name + + cmd = [ + self.executable, + self.args.gyp_script, + '-G', + 'output_dir=' + output_dir, + ] + + # Ensure that we have an environment that only contains + # the exact values of the GYP variables we need. + env = os.environ.copy() + + # This is a terrible hack to work around the fact that + # //tools/clang/scripts/update.py is invoked by GYP and GN but + # currently relies on an environment variable to figure out + # what revision to embed in the command line #defines. + # For GN, we've made this work via a gn arg that will cause update.py + # to get an additional command line arg, but getting that to work + # via GYP_DEFINES has proven difficult, so we rewrite the GYP_DEFINES + # to get rid of the arg and add the old var in, instead. + # See crbug.com/582737 for more on this. This can hopefully all + # go away with GYP. + m = re.search('llvm_force_head_revision=1\s*', gyp_defines) + if m: + env['LLVM_FORCE_HEAD_REVISION'] = '1' + gyp_defines = gyp_defines.replace(m.group(0), '') + + # This is another terrible hack to work around the fact that + # GYP sets the link concurrency to use via the GYP_LINK_CONCURRENCY + # environment variable, and not via a proper GYP_DEFINE. See + # crbug.com/611491 for more on this. + m = re.search('gyp_link_concurrency=(\d+)(\s*)', gyp_defines) + if m: + env['GYP_LINK_CONCURRENCY'] = m.group(1) + gyp_defines = gyp_defines.replace(m.group(0), '') + + env['GYP_GENERATORS'] = 'ninja' + if 'GYP_CHROMIUM_NO_ACTION' in env: + del env['GYP_CHROMIUM_NO_ACTION'] + if 'GYP_CROSSCOMPILE' in env: + del env['GYP_CROSSCOMPILE'] + env['GYP_DEFINES'] = gyp_defines + if vals['gyp_crosscompile']: + env['GYP_CROSSCOMPILE'] = '1' + return cmd, env + + def RunGNAnalyze(self, vals): + # analyze runs before 'gn gen' now, so we need to run gn gen + # in order to ensure that we have a build directory. + ret = self.RunGNGen(vals) + if ret: + return ret + + inp = self.ReadInputJSON(['files', 'test_targets', + 'additional_compile_targets']) + if self.args.verbose: + self.Print() + self.Print('analyze input:') + self.PrintJSON(inp) + self.Print() + + # TODO(crbug.com/555273) - currently GN treats targets and + # additional_compile_targets identically since we can't tell the + # difference between a target that is a group in GN and one that isn't. + # We should eventually fix this and treat the two types differently. + targets = (set(inp['test_targets']) | + set(inp['additional_compile_targets'])) + + output_path = self.args.output_path[0] + + # Bail out early if a GN file was modified, since 'gn refs' won't know + # what to do about it. Also, bail out early if 'all' was asked for, + # since we can't deal with it yet. + if (any(f.endswith('.gn') or f.endswith('.gni') for f in inp['files']) or + 'all' in targets): + self.WriteJSON({ + 'status': 'Found dependency (all)', + 'compile_targets': sorted(targets), + 'test_targets': sorted(targets & set(inp['test_targets'])), + }, output_path) + return 0 + + # This shouldn't normally happen, but could due to unusual race conditions, + # like a try job that gets scheduled before a patch lands but runs after + # the patch has landed. + if not inp['files']: + self.Print('Warning: No files modified in patch, bailing out early.') + self.WriteJSON({ + 'status': 'No dependency', + 'compile_targets': [], + 'test_targets': [], + }, output_path) + return 0 + + ret = 0 + response_file = self.TempFile() + response_file.write('\n'.join(inp['files']) + '\n') + response_file.close() + + matching_targets = set() + try: + cmd = self.GNCmd('refs', + self.args.path[0], + '@%s' % response_file.name, + '--all', + '--as=output') + ret, out, _ = self.Run(cmd, force_verbose=False) + if ret and not 'The input matches no targets' in out: + self.WriteFailureAndRaise('gn refs returned %d: %s' % (ret, out), + output_path) + build_dir = self.ToSrcRelPath(self.args.path[0]) + self.sep + for output in out.splitlines(): + build_output = output.replace(build_dir, '') + if build_output in targets: + matching_targets.add(build_output) + + cmd = self.GNCmd('refs', + self.args.path[0], + '@%s' % response_file.name, + '--all') + ret, out, _ = self.Run(cmd, force_verbose=False) + if ret and not 'The input matches no targets' in out: + self.WriteFailureAndRaise('gn refs returned %d: %s' % (ret, out), + output_path) + for label in out.splitlines(): + build_target = label[2:] + # We want to accept 'chrome/android:chrome_public_apk' and + # just 'chrome_public_apk'. This may result in too many targets + # getting built, but we can adjust that later if need be. + for input_target in targets: + if (input_target == build_target or + build_target.endswith(':' + input_target)): + matching_targets.add(input_target) + finally: + self.RemoveFile(response_file.name) + + if matching_targets: + self.WriteJSON({ + 'status': 'Found dependency', + 'compile_targets': sorted(matching_targets), + 'test_targets': sorted(matching_targets & + set(inp['test_targets'])), + }, output_path) + else: + self.WriteJSON({ + 'status': 'No dependency', + 'compile_targets': [], + 'test_targets': [], + }, output_path) + + if self.args.verbose: + outp = json.loads(self.ReadFile(output_path)) + self.Print() + self.Print('analyze output:') + self.PrintJSON(outp) + self.Print() + + return 0 + + def ReadInputJSON(self, required_keys): + path = self.args.input_path[0] + output_path = self.args.output_path[0] + if not self.Exists(path): + self.WriteFailureAndRaise('"%s" does not exist' % path, output_path) + + try: + inp = json.loads(self.ReadFile(path)) + except Exception as e: + self.WriteFailureAndRaise('Failed to read JSON input from "%s": %s' % + (path, e), output_path) + + for k in required_keys: + if not k in inp: + self.WriteFailureAndRaise('input file is missing a "%s" key' % k, + output_path) + + return inp + + def WriteFailureAndRaise(self, msg, output_path): + if output_path: + self.WriteJSON({'error': msg}, output_path, force_verbose=True) + raise MBErr(msg) + + def WriteJSON(self, obj, path, force_verbose=False): + try: + self.WriteFile(path, json.dumps(obj, indent=2, sort_keys=True) + '\n', + force_verbose=force_verbose) + except Exception as e: + raise MBErr('Error %s writing to the output path "%s"' % + (e, path)) + + def CheckCompile(self, master, builder): + url_template = self.args.url_template + '/{builder}/builds/_all?as_text=1' + url = urllib2.quote(url_template.format(master=master, builder=builder), + safe=':/()?=') + try: + builds = json.loads(self.Fetch(url)) + except Exception as e: + return str(e) + successes = sorted( + [int(x) for x in builds.keys() if "text" in builds[x] and + cmp(builds[x]["text"][:2], ["build", "successful"]) == 0], + reverse=True) + if not successes: + return "no successful builds" + build = builds[str(successes[0])] + step_names = set([step["name"] for step in build["steps"]]) + compile_indicators = set(["compile", "compile (with patch)", "analyze"]) + if compile_indicators & step_names: + return "compiles" + return "does not compile" + + def PrintCmd(self, cmd, env): + if self.platform == 'win32': + env_prefix = 'set ' + env_quoter = QuoteForSet + shell_quoter = QuoteForCmd + else: + env_prefix = '' + env_quoter = pipes.quote + shell_quoter = pipes.quote + + def print_env(var): + if env and var in env: + self.Print('%s%s=%s' % (env_prefix, var, env_quoter(env[var]))) + + print_env('GYP_CROSSCOMPILE') + print_env('GYP_DEFINES') + print_env('GYP_LINK_CONCURRENCY') + print_env('LLVM_FORCE_HEAD_REVISION') + + if cmd[0] == self.executable: + cmd = ['python'] + cmd[1:] + self.Print(*[shell_quoter(arg) for arg in cmd]) + + def PrintJSON(self, obj): + self.Print(json.dumps(obj, indent=2, sort_keys=True)) + + def GNTargetName(self, target): + return target + + def Build(self, target): + build_dir = self.ToSrcRelPath(self.args.path[0]) + ninja_cmd = ['ninja', '-C', build_dir] + if self.args.jobs: + ninja_cmd.extend(['-j', '%d' % self.args.jobs]) + ninja_cmd.append(target) + ret, _, _ = self.Run(ninja_cmd, force_verbose=False, buffer_output=False) + return ret + + def Run(self, cmd, env=None, force_verbose=True, buffer_output=True): + # This function largely exists so it can be overridden for testing. + if self.args.dryrun or self.args.verbose or force_verbose: + self.PrintCmd(cmd, env) + if self.args.dryrun: + return 0, '', '' + + ret, out, err = self.Call(cmd, env=env, buffer_output=buffer_output) + if self.args.verbose or force_verbose: + if ret: + self.Print(' -> returned %d' % ret) + if out: + self.Print(out, end='') + if err: + self.Print(err, end='', file=sys.stderr) + return ret, out, err + + def Call(self, cmd, env=None, buffer_output=True): + if buffer_output: + p = subprocess.Popen(cmd, shell=False, cwd=self.chromium_src_dir, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + env=env) + out, err = p.communicate() + else: + p = subprocess.Popen(cmd, shell=False, cwd=self.chromium_src_dir, + env=env) + p.wait() + out = err = '' + return p.returncode, out, err + + def ExpandUser(self, path): + # This function largely exists so it can be overridden for testing. + return os.path.expanduser(path) + + def Exists(self, path): + # This function largely exists so it can be overridden for testing. + return os.path.exists(path) + + def Fetch(self, url): + # This function largely exists so it can be overridden for testing. + f = urllib2.urlopen(url) + contents = f.read() + f.close() + return contents + + def MaybeMakeDirectory(self, path): + try: + os.makedirs(path) + except OSError, e: + if e.errno != errno.EEXIST: + raise + + def PathJoin(self, *comps): + # This function largely exists so it can be overriden for testing. + return os.path.join(*comps) + + def Print(self, *args, **kwargs): + # This function largely exists so it can be overridden for testing. + print(*args, **kwargs) + if kwargs.get('stream', sys.stdout) == sys.stdout: + sys.stdout.flush() + + def ReadFile(self, path): + # This function largely exists so it can be overriden for testing. + with open(path) as fp: + return fp.read() + + def RelPath(self, path, start='.'): + # This function largely exists so it can be overriden for testing. + return os.path.relpath(path, start) + + def RemoveFile(self, path): + # This function largely exists so it can be overriden for testing. + os.remove(path) + + def RemoveDirectory(self, abs_path): + if self.platform == 'win32': + # In other places in chromium, we often have to retry this command + # because we're worried about other processes still holding on to + # file handles, but when MB is invoked, it will be early enough in the + # build that their should be no other processes to interfere. We + # can change this if need be. + self.Run(['cmd.exe', '/c', 'rmdir', '/q', '/s', abs_path]) + else: + shutil.rmtree(abs_path, ignore_errors=True) + + def TempFile(self, mode='w'): + # This function largely exists so it can be overriden for testing. + return tempfile.NamedTemporaryFile(mode=mode, delete=False) + + def WriteFile(self, path, contents, force_verbose=False): + # This function largely exists so it can be overriden for testing. + if self.args.dryrun or self.args.verbose or force_verbose: + self.Print('\nWriting """\\\n%s""" to %s.\n' % (contents, path)) + with open(path, 'w') as fp: + return fp.write(contents) + + +class MBErr(Exception): + pass + + +# See http://goo.gl/l5NPDW and http://goo.gl/4Diozm for the painful +# details of this next section, which handles escaping command lines +# so that they can be copied and pasted into a cmd window. +UNSAFE_FOR_SET = set('^<>&|') +UNSAFE_FOR_CMD = UNSAFE_FOR_SET.union(set('()%')) +ALL_META_CHARS = UNSAFE_FOR_CMD.union(set('"')) + + +def QuoteForSet(arg): + if any(a in UNSAFE_FOR_SET for a in arg): + arg = ''.join('^' + a if a in UNSAFE_FOR_SET else a for a in arg) + return arg + + +def QuoteForCmd(arg): + # First, escape the arg so that CommandLineToArgvW will parse it properly. + # From //tools/gyp/pylib/gyp/msvs_emulation.py:23. + if arg == '' or ' ' in arg or '"' in arg: + quote_re = re.compile(r'(\\*)"') + arg = '"%s"' % (quote_re.sub(lambda mo: 2 * mo.group(1) + '\\"', arg)) + + # Then check to see if the arg contains any metacharacters other than + # double quotes; if it does, quote everything (including the double + # quotes) for safety. + if any(a in UNSAFE_FOR_CMD for a in arg): + arg = ''.join('^' + a if a in ALL_META_CHARS else a for a in arg) + return arg + + +if __name__ == '__main__': + sys.exit(main(sys.argv[1:])) diff --git a/deps/v8/tools/mb/mb_unittest.py b/deps/v8/tools/mb/mb_unittest.py new file mode 100755 index 00000000000000..ac58c0284f986f --- /dev/null +++ b/deps/v8/tools/mb/mb_unittest.py @@ -0,0 +1,572 @@ +#!/usr/bin/python +# Copyright 2016 the V8 project authors. All rights reserved. +# Copyright 2015 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Tests for mb.py.""" + +import json +import StringIO +import os +import sys +import unittest + +import mb + + +class FakeMBW(mb.MetaBuildWrapper): + def __init__(self, win32=False): + super(FakeMBW, self).__init__() + + # Override vars for test portability. + if win32: + self.chromium_src_dir = 'c:\\fake_src' + self.default_config = 'c:\\fake_src\\tools\\mb\\mb_config.pyl' + self.platform = 'win32' + self.executable = 'c:\\python\\python.exe' + self.sep = '\\' + else: + self.chromium_src_dir = '/fake_src' + self.default_config = '/fake_src/tools/mb/mb_config.pyl' + self.executable = '/usr/bin/python' + self.platform = 'linux2' + self.sep = '/' + + self.files = {} + self.calls = [] + self.cmds = [] + self.cross_compile = None + self.out = '' + self.err = '' + self.rmdirs = [] + + def ExpandUser(self, path): + return '$HOME/%s' % path + + def Exists(self, path): + return self.files.get(path) is not None + + def MaybeMakeDirectory(self, path): + self.files[path] = True + + def PathJoin(self, *comps): + return self.sep.join(comps) + + def ReadFile(self, path): + return self.files[path] + + def WriteFile(self, path, contents, force_verbose=False): + if self.args.dryrun or self.args.verbose or force_verbose: + self.Print('\nWriting """\\\n%s""" to %s.\n' % (contents, path)) + self.files[path] = contents + + def Call(self, cmd, env=None, buffer_output=True): + if env: + self.cross_compile = env.get('GYP_CROSSCOMPILE') + self.calls.append(cmd) + if self.cmds: + return self.cmds.pop(0) + return 0, '', '' + + def Print(self, *args, **kwargs): + sep = kwargs.get('sep', ' ') + end = kwargs.get('end', '\n') + f = kwargs.get('file', sys.stdout) + if f == sys.stderr: + self.err += sep.join(args) + end + else: + self.out += sep.join(args) + end + + def TempFile(self, mode='w'): + return FakeFile(self.files) + + def RemoveFile(self, path): + del self.files[path] + + def RemoveDirectory(self, path): + self.rmdirs.append(path) + files_to_delete = [f for f in self.files if f.startswith(path)] + for f in files_to_delete: + self.files[f] = None + + +class FakeFile(object): + def __init__(self, files): + self.name = '/tmp/file' + self.buf = '' + self.files = files + + def write(self, contents): + self.buf += contents + + def close(self): + self.files[self.name] = self.buf + + +TEST_CONFIG = """\ +{ + 'masters': { + 'chromium': {}, + 'fake_master': { + 'fake_builder': 'gyp_rel_bot', + 'fake_gn_builder': 'gn_rel_bot', + 'fake_gyp_crosscompile_builder': 'gyp_crosscompile', + 'fake_gn_debug_builder': 'gn_debug_goma', + 'fake_gyp_builder': 'gyp_debug', + 'fake_gn_args_bot': '//build/args/bots/fake_master/fake_gn_args_bot.gn', + 'fake_multi_phase': ['gn_phase_1', 'gn_phase_2'], + }, + }, + 'configs': { + 'gyp_rel_bot': ['gyp', 'rel', 'goma'], + 'gn_debug_goma': ['gn', 'debug', 'goma'], + 'gyp_debug': ['gyp', 'debug', 'fake_feature1'], + 'gn_rel_bot': ['gn', 'rel', 'goma'], + 'gyp_crosscompile': ['gyp', 'crosscompile'], + 'gn_phase_1': ['gn', 'phase_1'], + 'gn_phase_2': ['gn', 'phase_2'], + }, + 'mixins': { + 'crosscompile': { + 'gyp_crosscompile': True, + }, + 'fake_feature1': { + 'gn_args': 'enable_doom_melon=true', + 'gyp_defines': 'doom_melon=1', + }, + 'gyp': {'type': 'gyp'}, + 'gn': {'type': 'gn'}, + 'goma': { + 'gn_args': 'use_goma=true', + 'gyp_defines': 'goma=1', + }, + 'phase_1': { + 'gn_args': 'phase=1', + 'gyp_args': 'phase=1', + }, + 'phase_2': { + 'gn_args': 'phase=2', + 'gyp_args': 'phase=2', + }, + 'rel': { + 'gn_args': 'is_debug=false', + }, + 'debug': { + 'gn_args': 'is_debug=true', + }, + }, +} +""" + + +TEST_BAD_CONFIG = """\ +{ + 'configs': { + 'gn_rel_bot_1': ['gn', 'rel', 'chrome_with_codecs'], + 'gn_rel_bot_2': ['gn', 'rel', 'bad_nested_config'], + }, + 'masters': { + 'chromium': { + 'a': 'gn_rel_bot_1', + 'b': 'gn_rel_bot_2', + }, + }, + 'mixins': { + 'gn': {'type': 'gn'}, + 'chrome_with_codecs': { + 'gn_args': 'proprietary_codecs=true', + }, + 'bad_nested_config': { + 'mixins': ['chrome_with_codecs'], + }, + 'rel': { + 'gn_args': 'is_debug=false', + }, + }, +} +""" + + +GYP_HACKS_CONFIG = """\ +{ + 'masters': { + 'chromium': {}, + 'fake_master': { + 'fake_builder': 'fake_config', + }, + }, + 'configs': { + 'fake_config': ['fake_mixin'], + }, + 'mixins': { + 'fake_mixin': { + 'type': 'gyp', + 'gn_args': '', + 'gyp_defines': + ('foo=bar llvm_force_head_revision=1 ' + 'gyp_link_concurrency=1 baz=1'), + }, + }, +} +""" + + +class UnitTest(unittest.TestCase): + def fake_mbw(self, files=None, win32=False): + mbw = FakeMBW(win32=win32) + mbw.files.setdefault(mbw.default_config, TEST_CONFIG) + mbw.files.setdefault( + mbw.ToAbsPath('//build/args/bots/fake_master/fake_gn_args_bot.gn'), + 'is_debug = false\n') + if files: + for path, contents in files.items(): + mbw.files[path] = contents + return mbw + + def check(self, args, mbw=None, files=None, out=None, err=None, ret=None): + if not mbw: + mbw = self.fake_mbw(files) + + actual_ret = mbw.Main(args) + + self.assertEqual(actual_ret, ret) + if out is not None: + self.assertEqual(mbw.out, out) + if err is not None: + self.assertEqual(mbw.err, err) + return mbw + + def test_clobber(self): + files = { + '/fake_src/out/Debug': None, + '/fake_src/out/Debug/mb_type': None, + } + mbw = self.fake_mbw(files) + + # The first time we run this, the build dir doesn't exist, so no clobber. + self.check(['gen', '-c', 'gn_debug_goma', '//out/Debug'], mbw=mbw, ret=0) + self.assertEqual(mbw.rmdirs, []) + self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gn') + + # The second time we run this, the build dir exists and matches, so no + # clobber. + self.check(['gen', '-c', 'gn_debug_goma', '//out/Debug'], mbw=mbw, ret=0) + self.assertEqual(mbw.rmdirs, []) + self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gn') + + # Now we switch build types; this should result in a clobber. + self.check(['gen', '-c', 'gyp_debug', '//out/Debug'], mbw=mbw, ret=0) + self.assertEqual(mbw.rmdirs, ['/fake_src/out/Debug']) + self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gyp') + + # Now we delete mb_type; this checks the case where the build dir + # exists but wasn't populated by mb; this should also result in a clobber. + del mbw.files['/fake_src/out/Debug/mb_type'] + self.check(['gen', '-c', 'gyp_debug', '//out/Debug'], mbw=mbw, ret=0) + self.assertEqual(mbw.rmdirs, + ['/fake_src/out/Debug', '/fake_src/out/Debug']) + self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gyp') + + def test_gn_analyze(self): + files = {'/tmp/in.json': """{\ + "files": ["foo/foo_unittest.cc"], + "test_targets": ["foo_unittests", "bar_unittests"], + "additional_compile_targets": [] + }"""} + + mbw = self.fake_mbw(files) + mbw.Call = lambda cmd, env=None, buffer_output=True: ( + 0, 'out/Default/foo_unittests\n', '') + + self.check(['analyze', '-c', 'gn_debug_goma', '//out/Default', + '/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0) + out = json.loads(mbw.files['/tmp/out.json']) + self.assertEqual(out, { + 'status': 'Found dependency', + 'compile_targets': ['foo_unittests'], + 'test_targets': ['foo_unittests'] + }) + + def test_gn_analyze_fails(self): + files = {'/tmp/in.json': """{\ + "files": ["foo/foo_unittest.cc"], + "test_targets": ["foo_unittests", "bar_unittests"], + "additional_compile_targets": [] + }"""} + + mbw = self.fake_mbw(files) + mbw.Call = lambda cmd, env=None, buffer_output=True: (1, '', '') + + self.check(['analyze', '-c', 'gn_debug_goma', '//out/Default', + '/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=1) + + def test_gn_analyze_all(self): + files = {'/tmp/in.json': """{\ + "files": ["foo/foo_unittest.cc"], + "test_targets": ["bar_unittests"], + "additional_compile_targets": ["all"] + }"""} + mbw = self.fake_mbw(files) + mbw.Call = lambda cmd, env=None, buffer_output=True: ( + 0, 'out/Default/foo_unittests\n', '') + self.check(['analyze', '-c', 'gn_debug_goma', '//out/Default', + '/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0) + out = json.loads(mbw.files['/tmp/out.json']) + self.assertEqual(out, { + 'status': 'Found dependency (all)', + 'compile_targets': ['all', 'bar_unittests'], + 'test_targets': ['bar_unittests'], + }) + + def test_gn_analyze_missing_file(self): + files = {'/tmp/in.json': """{\ + "files": ["foo/foo_unittest.cc"], + "test_targets": ["bar_unittests"], + "additional_compile_targets": [] + }"""} + mbw = self.fake_mbw(files) + mbw.cmds = [ + (0, '', ''), + (1, 'The input matches no targets, configs, or files\n', ''), + (1, 'The input matches no targets, configs, or files\n', ''), + ] + + self.check(['analyze', '-c', 'gn_debug_goma', '//out/Default', + '/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0) + out = json.loads(mbw.files['/tmp/out.json']) + self.assertEqual(out, { + 'status': 'No dependency', + 'compile_targets': [], + 'test_targets': [], + }) + + def test_gn_gen(self): + mbw = self.fake_mbw() + self.check(['gen', '-c', 'gn_debug_goma', '//out/Default', '-g', '/goma'], + mbw=mbw, ret=0) + self.assertMultiLineEqual(mbw.files['/fake_src/out/Default/args.gn'], + ('goma_dir = "/goma"\n' + 'is_debug = true\n' + 'use_goma = true\n')) + + # Make sure we log both what is written to args.gn and the command line. + self.assertIn('Writing """', mbw.out) + self.assertIn('/fake_src/buildtools/linux64/gn gen //out/Default --check', + mbw.out) + + mbw = self.fake_mbw(win32=True) + self.check(['gen', '-c', 'gn_debug_goma', '-g', 'c:\\goma', '//out/Debug'], + mbw=mbw, ret=0) + self.assertMultiLineEqual(mbw.files['c:\\fake_src\\out\\Debug\\args.gn'], + ('goma_dir = "c:\\\\goma"\n' + 'is_debug = true\n' + 'use_goma = true\n')) + self.assertIn('c:\\fake_src\\buildtools\\win\\gn.exe gen //out/Debug ' + '--check\n', mbw.out) + + mbw = self.fake_mbw() + self.check(['gen', '-m', 'fake_master', '-b', 'fake_gn_args_bot', + '//out/Debug'], + mbw=mbw, ret=0) + self.assertEqual( + mbw.files['/fake_src/out/Debug/args.gn'], + 'import("//build/args/bots/fake_master/fake_gn_args_bot.gn")\n') + + + def test_gn_gen_fails(self): + mbw = self.fake_mbw() + mbw.Call = lambda cmd, env=None, buffer_output=True: (1, '', '') + self.check(['gen', '-c', 'gn_debug_goma', '//out/Default'], mbw=mbw, ret=1) + + def test_gn_gen_swarming(self): + files = { + '/tmp/swarming_targets': 'base_unittests\n', + '/fake_src/testing/buildbot/gn_isolate_map.pyl': ( + "{'base_unittests': {" + " 'label': '//base:base_unittests'," + " 'type': 'raw'," + " 'args': []," + "}}\n" + ), + '/fake_src/out/Default/base_unittests.runtime_deps': ( + "base_unittests\n" + ), + } + mbw = self.fake_mbw(files) + self.check(['gen', + '-c', 'gn_debug_goma', + '--swarming-targets-file', '/tmp/swarming_targets', + '//out/Default'], mbw=mbw, ret=0) + self.assertIn('/fake_src/out/Default/base_unittests.isolate', + mbw.files) + self.assertIn('/fake_src/out/Default/base_unittests.isolated.gen.json', + mbw.files) + + def test_gn_isolate(self): + files = { + '/fake_src/out/Default/toolchain.ninja': "", + '/fake_src/testing/buildbot/gn_isolate_map.pyl': ( + "{'base_unittests': {" + " 'label': '//base:base_unittests'," + " 'type': 'raw'," + " 'args': []," + "}}\n" + ), + '/fake_src/out/Default/base_unittests.runtime_deps': ( + "base_unittests\n" + ), + } + self.check(['isolate', '-c', 'gn_debug_goma', '//out/Default', + 'base_unittests'], files=files, ret=0) + + # test running isolate on an existing build_dir + files['/fake_src/out/Default/args.gn'] = 'is_debug = True\n' + self.check(['isolate', '//out/Default', 'base_unittests'], + files=files, ret=0) + + files['/fake_src/out/Default/mb_type'] = 'gn\n' + self.check(['isolate', '//out/Default', 'base_unittests'], + files=files, ret=0) + + def test_gn_run(self): + files = { + '/fake_src/testing/buildbot/gn_isolate_map.pyl': ( + "{'base_unittests': {" + " 'label': '//base:base_unittests'," + " 'type': 'raw'," + " 'args': []," + "}}\n" + ), + '/fake_src/out/Default/base_unittests.runtime_deps': ( + "base_unittests\n" + ), + } + self.check(['run', '-c', 'gn_debug_goma', '//out/Default', + 'base_unittests'], files=files, ret=0) + + def test_gn_lookup(self): + self.check(['lookup', '-c', 'gn_debug_goma'], ret=0) + + def test_gn_lookup_goma_dir_expansion(self): + self.check(['lookup', '-c', 'gn_rel_bot', '-g', '/foo'], ret=0, + out=('\n' + 'Writing """\\\n' + 'goma_dir = "/foo"\n' + 'is_debug = false\n' + 'use_goma = true\n' + '""" to _path_/args.gn.\n\n' + '/fake_src/buildtools/linux64/gn gen _path_\n')) + + def test_gyp_analyze(self): + mbw = self.check(['analyze', '-c', 'gyp_rel_bot', '//out/Release', + '/tmp/in.json', '/tmp/out.json'], ret=0) + self.assertIn('analyzer', mbw.calls[0]) + + def test_gyp_crosscompile(self): + mbw = self.fake_mbw() + self.check(['gen', '-c', 'gyp_crosscompile', '//out/Release'], + mbw=mbw, ret=0) + self.assertTrue(mbw.cross_compile) + + def test_gyp_gen(self): + self.check(['gen', '-c', 'gyp_rel_bot', '-g', '/goma', '//out/Release'], + ret=0, + out=("GYP_DEFINES='goma=1 gomadir=/goma'\n" + "python build/gyp_chromium -G output_dir=out\n")) + + mbw = self.fake_mbw(win32=True) + self.check(['gen', '-c', 'gyp_rel_bot', '-g', 'c:\\goma', '//out/Release'], + mbw=mbw, ret=0, + out=("set GYP_DEFINES=goma=1 gomadir='c:\\goma'\n" + "python build\\gyp_chromium -G output_dir=out\n")) + + def test_gyp_gen_fails(self): + mbw = self.fake_mbw() + mbw.Call = lambda cmd, env=None, buffer_output=True: (1, '', '') + self.check(['gen', '-c', 'gyp_rel_bot', '//out/Release'], mbw=mbw, ret=1) + + def test_gyp_lookup_goma_dir_expansion(self): + self.check(['lookup', '-c', 'gyp_rel_bot', '-g', '/foo'], ret=0, + out=("GYP_DEFINES='goma=1 gomadir=/foo'\n" + "python build/gyp_chromium -G output_dir=_path_\n")) + + def test_help(self): + orig_stdout = sys.stdout + try: + sys.stdout = StringIO.StringIO() + self.assertRaises(SystemExit, self.check, ['-h']) + self.assertRaises(SystemExit, self.check, ['help']) + self.assertRaises(SystemExit, self.check, ['help', 'gen']) + finally: + sys.stdout = orig_stdout + + def test_multiple_phases(self): + # Check that not passing a --phase to a multi-phase builder fails. + mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase'], + ret=1) + self.assertIn('Must specify a build --phase', mbw.out) + + # Check that passing a --phase to a single-phase builder fails. + mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_gn_builder', + '--phase', '1'], + ret=1) + self.assertIn('Must not specify a build --phase', mbw.out) + + # Check different ranges; 0 and 3 are out of bounds, 1 and 2 should work. + mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase', + '--phase', '0'], ret=1) + self.assertIn('Phase 0 out of bounds', mbw.out) + + mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase', + '--phase', '1'], ret=0) + self.assertIn('phase = 1', mbw.out) + + mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase', + '--phase', '2'], ret=0) + self.assertIn('phase = 2', mbw.out) + + mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase', + '--phase', '3'], ret=1) + self.assertIn('Phase 3 out of bounds', mbw.out) + + def test_validate(self): + mbw = self.fake_mbw() + self.check(['validate'], mbw=mbw, ret=0) + + def test_gyp_env_hacks(self): + mbw = self.fake_mbw() + mbw.files[mbw.default_config] = GYP_HACKS_CONFIG + self.check(['lookup', '-c', 'fake_config'], mbw=mbw, + ret=0, + out=("GYP_DEFINES='foo=bar baz=1'\n" + "GYP_LINK_CONCURRENCY=1\n" + "LLVM_FORCE_HEAD_REVISION=1\n" + "python build/gyp_chromium -G output_dir=_path_\n")) + + +if __name__ == '__main__': + unittest.main() + + def test_validate(self): + mbw = self.fake_mbw() + self.check(['validate'], mbw=mbw, ret=0) + + def test_bad_validate(self): + mbw = self.fake_mbw() + mbw.files[mbw.default_config] = TEST_BAD_CONFIG + self.check(['validate'], mbw=mbw, ret=1) + + def test_gyp_env_hacks(self): + mbw = self.fake_mbw() + mbw.files[mbw.default_config] = GYP_HACKS_CONFIG + self.check(['lookup', '-c', 'fake_config'], mbw=mbw, + ret=0, + out=("GYP_DEFINES='foo=bar baz=1'\n" + "GYP_LINK_CONCURRENCY=1\n" + "LLVM_FORCE_HEAD_REVISION=1\n" + "python build/gyp_chromium -G output_dir=_path_\n")) + + +if __name__ == '__main__': + unittest.main() diff --git a/deps/v8/tools/memory/asan/blacklist.txt b/deps/v8/tools/memory/asan/blacklist.txt new file mode 100644 index 00000000000000..2bb1aa9714f13c --- /dev/null +++ b/deps/v8/tools/memory/asan/blacklist.txt @@ -0,0 +1,4 @@ +# The rules in this file are only applied at compile time. If you can modify the +# source in question, consider function attributes to disable instrumentation. +# +# Please think twice before you add or remove these rules. \ No newline at end of file diff --git a/deps/v8/tools/memory/tsan_v2/ignores.txt b/deps/v8/tools/memory/tsan_v2/ignores.txt new file mode 100644 index 00000000000000..80babf4894eecf --- /dev/null +++ b/deps/v8/tools/memory/tsan_v2/ignores.txt @@ -0,0 +1,5 @@ +# The rules in this file are only applied at compile time. If you can modify the +# source in question, consider function attributes to disable instrumentation. +# +# Please think twice before you add or remove these rules. +# Data races should typically go to suppressions.txt. \ No newline at end of file diff --git a/deps/v8/tools/mingw-generate-makefiles.sh b/deps/v8/tools/mingw-generate-makefiles.sh index 32af52d39e004c..67715fc15b0a47 100755 --- a/deps/v8/tools/mingw-generate-makefiles.sh +++ b/deps/v8/tools/mingw-generate-makefiles.sh @@ -27,7 +27,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Monkey-patch GYP. -cat > build/gyp/gyp.mingw << EOF +cat > tools/gyp/gyp.mingw << EOF #!/usr/bin/env python # Copyright (c) 2009 Google Inc. All rights reserved. @@ -74,11 +74,11 @@ EOF find out -name '*.mk' -or -name 'Makefile*' -exec rm {} \; # Generate fresh Makefiles. -mv build/gyp/gyp build/gyp/gyp.original -mv build/gyp/gyp.mingw build/gyp/gyp +mv tools/gyp/gyp tools/gyp/gyp.original +mv tools/gyp/gyp.mingw tools/gyp/gyp make out/Makefile.ia32 -mv build/gyp/gyp build/gyp/gyp.mingw -mv build/gyp/gyp.original build/gyp/gyp +mv tools/gyp/gyp tools/gyp/gyp.mingw +mv tools/gyp/gyp.original tools/gyp/gyp # Patch generated Makefiles: replace most backslashes with forward slashes, # fix library names in linker flags. diff --git a/deps/v8/tools/msan/blacklist.txt b/deps/v8/tools/msan/blacklist.txt new file mode 100644 index 00000000000000..2bb1aa9714f13c --- /dev/null +++ b/deps/v8/tools/msan/blacklist.txt @@ -0,0 +1,4 @@ +# The rules in this file are only applied at compile time. If you can modify the +# source in question, consider function attributes to disable instrumentation. +# +# Please think twice before you add or remove these rules. \ No newline at end of file diff --git a/deps/v8/tools/nacl-run.py b/deps/v8/tools/nacl-run.py deleted file mode 100755 index 32055feb0f269d..00000000000000 --- a/deps/v8/tools/nacl-run.py +++ /dev/null @@ -1,147 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2013 the V8 project authors. All rights reserved. -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# This script executes the passed command line using the Native Client -# 'sel_ldr' container. It is derived from android-run.py. - -import os -from os.path import join, dirname, abspath -import re -import subprocess -import sys -import tempfile - -def Check(output, errors): - failed = any([s.startswith('/system/bin/sh:') or s.startswith('ANDROID') - for s in output.split('\n')]) - return 1 if failed else 0 - -def Execute(cmdline): - (fd_out, outname) = tempfile.mkstemp() - (fd_err, errname) = tempfile.mkstemp() - process = subprocess.Popen( - args=cmdline, - shell=True, - stdout=fd_out, - stderr=fd_err, - ) - exit_code = process.wait() - os.close(fd_out) - os.close(fd_err) - output = file(outname).read() - errors = file(errname).read() - os.unlink(outname) - os.unlink(errname) - sys.stdout.write(output) - sys.stderr.write(errors) - return exit_code or Check(output, errors) - -def Escape(arg): - def ShouldEscape(): - for x in arg: - if not x.isalnum() and x != '-' and x != '_': - return True - return False - - return arg if not ShouldEscape() else '"%s"' % (arg.replace('"', '\\"')) - -def WriteToTemporaryFile(data): - (fd, fname) = tempfile.mkstemp() - os.close(fd) - tmp_file = open(fname, "w") - tmp_file.write(data) - tmp_file.close() - return fname - -def GetNaClArchFromNexe(nexe): - try: - p = subprocess.Popen(['file', nexe], stdout=subprocess.PIPE) - out, err = p.communicate() - lines = [re.sub("\s+", " " , line) for line in out.split('\n')] - if lines[0].find(": ELF 32-bit LSB executable, Intel 80386") > 0: - return "x86_32" - if lines[0].find(": ELF 64-bit LSB executable, x86-64") > 0: - return "x86_64" - except: - print 'file ' + sys.argv[1] + ' failed' - return None - -def GetNaClResources(nexe): - nacl_sdk_dir = os.environ["NACL_SDK_ROOT"] - nacl_arch = GetNaClArchFromNexe(nexe) - if sys.platform.startswith("linux"): - platform = "linux" - elif sys.platform == "darwin": - platform = "mac" - else: - print("NaCl V8 testing is supported on Linux and MacOS only.") - sys.exit(1) - - if nacl_arch is "x86_64": - toolchain = platform + "_x86_glibc" - sel_ldr = "sel_ldr_x86_64" - irt = "irt_core_x86_64.nexe" - libdir = "lib64" - elif nacl_arch is "x86_32": - toolchain = platform + "_x86_glibc" - sel_ldr = "sel_ldr_x86_32" - irt = "irt_core_x86_32.nexe" - libdir = "lib32" - elif nacl_arch is "arm": - print("NaCl V8 ARM support is not ready yet.") - sys.exit(1) - else: - print("Invalid nexe %s with NaCl arch %s" % (nexe, nacl_arch)) - sys.exit(1) - - nacl_sel_ldr = os.path.join(nacl_sdk_dir, "tools", sel_ldr) - nacl_irt = os.path.join(nacl_sdk_dir, "tools", irt) - - return (nacl_sdk_dir, nacl_sel_ldr, nacl_irt) - -def Main(): - if (len(sys.argv) == 1): - print("Usage: %s " % sys.argv[0]) - return 1 - - args = [Escape(arg) for arg in sys.argv[1:]] - - (nacl_sdk_dir, nacl_sel_ldr, nacl_irt) = GetNaClResources(sys.argv[1]) - - # sel_ldr Options: - # -c -c: disable validation (for performance) - # -a: allow file access - # -B : load the IRT - command = ' '.join([nacl_sel_ldr, '-c', '-c', '-a', '-B', nacl_irt, '--'] + - args) - error_code = Execute(command) - return error_code - -if __name__ == '__main__': - sys.exit(Main()) diff --git a/deps/v8/tools/oom_dump/SConstruct b/deps/v8/tools/objdump-v8 old mode 100644 new mode 100755 similarity index 51% rename from deps/v8/tools/oom_dump/SConstruct rename to deps/v8/tools/objdump-v8 index f228c8907608f4..25ec4745e64a6f --- a/deps/v8/tools/oom_dump/SConstruct +++ b/deps/v8/tools/objdump-v8 @@ -1,4 +1,6 @@ -# Copyright 2010 the V8 project authors. All rights reserved. +#!/usr/bin/env python +# +# Copyright 2016 the V8 project authors. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: @@ -25,18 +27,57 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -vars = Variables('custom.py') -vars.Add(PathVariable('BREAKPAD_DIR', - 'Path to checkout of google-breakpad project', - '~/google-breakpad', - PathVariable.PathIsDir)) -vars.Add(PathVariable('V8_DIR', - 'Path to checkout of v8 project', - '../..', - PathVariable.PathIsDir)) - -env = Environment(variables = vars, - CPPPATH = ['${BREAKPAD_DIR}/src', '${V8_DIR}/src'], - LIBPATH = ['/usr/local/lib', '${V8_DIR}']) - -env.Program('oom_dump.cc', LIBS = ['breakpad', 'v8', 'pthread']) +import os.path +import re +import subprocess +import sys + + +def get_address_bounds(): + start = -1 + end = -1 + for arg in sys.argv: + if arg.startswith("--start-address="): + start = int(arg[-12:], 16) + if arg.startswith("--stop-address="): + end = int(arg[-12:], 16) + return start, end + + +def format_line(line): + pieces = line.split(None, 3) + return " " + pieces[0][2:] + ":\t" + pieces[3] + + +def is_comment(line): + stripped = line.strip() + return stripped.startswith("--") or stripped.startswith(";;") + +def main(): + filename = sys.argv[-1] + match = re.match(r"/tmp/perf-(.*)\.map", filename) + if match: + start, end = get_address_bounds() + process_codefile = "code-" + match.group(1) + "-1.asm" + if os.path.exists(process_codefile): + codefile = open(process_codefile, "r") + else: + codefile = open("code.asm", "r") + with codefile: + printing = False + for line in codefile: + if line.startswith("0x"): + addr = int(line.split()[0], 0) + if start <= addr <= end: + printing = True + sys.stdout.write(format_line(line)) + elif printing: + break + elif printing and not is_comment(line): + break + else: + sys.argv[0] = "objdump" + sys.exit(subprocess.call(sys.argv)) + +if __name__ == "__main__": + main() diff --git a/deps/v8/tools/oom_dump/README b/deps/v8/tools/oom_dump/README deleted file mode 100644 index 1d840b9a9ce04f..00000000000000 --- a/deps/v8/tools/oom_dump/README +++ /dev/null @@ -1,33 +0,0 @@ -oom_dump extracts useful information from Google Chrome OOM minidumps. - -To build one needs a google-breakpad checkout -(http://code.google.com/p/google-breakpad/). - -First, one needs to build and install breakpad itself. For instructions -check google-breakpad, but currently it's as easy as: - - ./configure - make - sudo make install - -(the catch: breakpad installs .so into /usr/local/lib, so you might -need some additional tweaking to make it discoverable, for example, -put a soft link into /usr/lib directory). - -Next step is to build v8. Note: you should build x64 version of v8, -if you're on 64-bit platform, otherwise you would get a link error when -building oom_dump. Also, if you are testing against an older version of chrome -you should build the corresponding version of V8 to make sure that the type-id -enum have the correct values. - -The last step is to build oom_dump itself. The following command should work: - - cd /tools/oom_dump - scons BREAKPAD_DIR= - -(Additionally you can control v8 working copy dir, but the default should work.) - -If everything goes fine, oom_dump should print -some useful information about the OOM crash. - -Note: currently only 32-bit Windows minidumps are supported. diff --git a/deps/v8/tools/oom_dump/oom_dump.cc b/deps/v8/tools/oom_dump/oom_dump.cc deleted file mode 100644 index 581e1914e2e345..00000000000000 --- a/deps/v8/tools/oom_dump/oom_dump.cc +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include -#include - -#include - -#include - -#include - -namespace { - -using google_breakpad::Minidump; -using google_breakpad::MinidumpContext; -using google_breakpad::MinidumpThread; -using google_breakpad::MinidumpThreadList; -using google_breakpad::MinidumpException; -using google_breakpad::MinidumpMemoryRegion; - -const char* InstanceTypeToString(int type) { - static char const* names[v8::internal::LAST_TYPE] = {0}; - if (names[v8::internal::STRING_TYPE] == NULL) { - using namespace v8::internal; -#define SET(type) names[type] = #type; - INSTANCE_TYPE_LIST(SET) -#undef SET - } - return names[type]; -} - - -u_int32_t ReadPointedValue(MinidumpMemoryRegion* region, - u_int64_t base, - int offset) { - u_int32_t ptr = 0; - CHECK(region->GetMemoryAtAddress(base + 4 * offset, &ptr)); - u_int32_t value = 0; - CHECK(region->GetMemoryAtAddress(ptr, &value)); - return value; -} - - -void ReadArray(MinidumpMemoryRegion* region, - u_int64_t array_ptr, - int size, - int* output) { - for (int i = 0; i < size; i++) { - u_int32_t value; - CHECK(region->GetMemoryAtAddress(array_ptr + 4 * i, &value)); - output[i] = value; - } -} - - -u_int32_t ReadArrayFrom(MinidumpMemoryRegion* region, - u_int64_t base, - int offset, - int size, - int* output) { - u_int32_t ptr = 0; - CHECK(region->GetMemoryAtAddress(base + 4 * offset, &ptr)); - ReadArray(region, ptr, size, output); -} - - -double toM(int size) { - return size / (1024. * 1024.); -} - - -class IndirectSorter { - public: - explicit IndirectSorter(int* a) : a_(a) { } - - bool operator() (int i0, int i1) { - return a_[i0] > a_[i1]; - } - - private: - int* a_; -}; - - -void DumpHeapStats(const char *minidump_file) { - Minidump minidump(minidump_file); - CHECK(minidump.Read()); - - MinidumpException *exception = minidump.GetException(); - CHECK(exception); - - MinidumpContext* crash_context = exception->GetContext(); - CHECK(crash_context); - - u_int32_t exception_thread_id = 0; - CHECK(exception->GetThreadID(&exception_thread_id)); - - MinidumpThreadList* thread_list = minidump.GetThreadList(); - CHECK(thread_list); - - MinidumpThread* exception_thread = - thread_list->GetThreadByID(exception_thread_id); - CHECK(exception_thread); - - // Currently only 32-bit Windows minidumps are supported. - CHECK_EQ(MD_CONTEXT_X86, crash_context->GetContextCPU()); - - const MDRawContextX86* contextX86 = crash_context->GetContextX86(); - CHECK(contextX86); - - const u_int32_t esp = contextX86->esp; - - MinidumpMemoryRegion* memory_region = exception_thread->GetMemory(); - CHECK(memory_region); - - const u_int64_t last = memory_region->GetBase() + memory_region->GetSize(); - - u_int64_t heap_stats_addr = 0; - for (u_int64_t addr = esp; addr < last; addr += 4) { - u_int32_t value = 0; - CHECK(memory_region->GetMemoryAtAddress(addr, &value)); - if (value >= esp && value < last) { - u_int32_t value2 = 0; - CHECK(memory_region->GetMemoryAtAddress(value, &value2)); - if (value2 == v8::internal::HeapStats::kStartMarker) { - heap_stats_addr = addr; - break; - } - } - } - CHECK(heap_stats_addr); - - // Read heap stats. - -#define READ_FIELD(offset) \ - ReadPointedValue(memory_region, heap_stats_addr, offset) - - CHECK(READ_FIELD(0) == v8::internal::HeapStats::kStartMarker); - CHECK(READ_FIELD(24) == v8::internal::HeapStats::kEndMarker); - - const int new_space_size = READ_FIELD(1); - const int new_space_capacity = READ_FIELD(2); - const int old_space_size = READ_FIELD(3); - const int old_space_capacity = READ_FIELD(4); - const int code_space_size = READ_FIELD(5); - const int code_space_capacity = READ_FIELD(6); - const int map_space_size = READ_FIELD(7); - const int map_space_capacity = READ_FIELD(8); - const int cell_space_size = READ_FIELD(9); - const int cell_space_capacity = READ_FIELD(10); - const int lo_space_size = READ_FIELD(11); - const int global_handle_count = READ_FIELD(12); - const int weak_global_handle_count = READ_FIELD(13); - const int pending_global_handle_count = READ_FIELD(14); - const int near_death_global_handle_count = READ_FIELD(15); - const int destroyed_global_handle_count = READ_FIELD(16); - const int memory_allocator_size = READ_FIELD(17); - const int memory_allocator_capacity = READ_FIELD(18); - const int os_error = READ_FIELD(19); -#undef READ_FIELD - - int objects_per_type[v8::internal::LAST_TYPE + 1] = {0}; - ReadArrayFrom(memory_region, heap_stats_addr, 21, - v8::internal::LAST_TYPE + 1, objects_per_type); - - int size_per_type[v8::internal::LAST_TYPE + 1] = {0}; - ReadArrayFrom(memory_region, heap_stats_addr, 22, v8::internal::LAST_TYPE + 1, - size_per_type); - - int js_global_objects = - objects_per_type[v8::internal::JS_GLOBAL_OBJECT_TYPE]; - int js_builtins_objects = - objects_per_type[v8::internal::JS_BUILTINS_OBJECT_TYPE]; - int js_global_proxies = - objects_per_type[v8::internal::JS_GLOBAL_PROXY_TYPE]; - - int indices[v8::internal::LAST_TYPE + 1]; - for (int i = 0; i <= v8::internal::LAST_TYPE; i++) { - indices[i] = i; - } - - std::stable_sort(indices, indices + sizeof(indices)/sizeof(indices[0]), - IndirectSorter(size_per_type)); - - int total_size = 0; - for (int i = 0; i <= v8::internal::LAST_TYPE; i++) { - total_size += size_per_type[i]; - } - - // Print heap stats. - - printf("exception thread ID: %" PRIu32 " (%#" PRIx32 ")\n", - exception_thread_id, exception_thread_id); - printf("heap stats address: %#" PRIx64 "\n", heap_stats_addr); -#define PRINT_INT_STAT(stat) \ - printf("\t%-25s\t% 10d\n", #stat ":", stat); -#define PRINT_MB_STAT(stat) \ - printf("\t%-25s\t% 10.3f MB\n", #stat ":", toM(stat)); - PRINT_MB_STAT(new_space_size); - PRINT_MB_STAT(new_space_capacity); - PRINT_MB_STAT(old_space_size); - PRINT_MB_STAT(old_space_capacity); - PRINT_MB_STAT(code_space_size); - PRINT_MB_STAT(code_space_capacity); - PRINT_MB_STAT(map_space_size); - PRINT_MB_STAT(map_space_capacity); - PRINT_MB_STAT(cell_space_size); - PRINT_MB_STAT(cell_space_capacity); - PRINT_MB_STAT(lo_space_size); - PRINT_INT_STAT(global_handle_count); - PRINT_INT_STAT(weak_global_handle_count); - PRINT_INT_STAT(pending_global_handle_count); - PRINT_INT_STAT(near_death_global_handle_count); - PRINT_INT_STAT(destroyed_global_handle_count); - PRINT_MB_STAT(memory_allocator_size); - PRINT_MB_STAT(memory_allocator_capacity); - PRINT_INT_STAT(os_error); -#undef PRINT_STAT - - printf("\n"); - - printf( - "\tJS_GLOBAL_OBJECT_TYPE/JS_BUILTINS_OBJECT_TYPE/JS_GLOBAL_PROXY_TYPE: " - "%d/%d/%d\n\n", - js_global_objects, js_builtins_objects, js_global_proxies); - - int running_size = 0; - for (int i = 0; i <= v8::internal::LAST_TYPE; i++) { - int type = indices[i]; - const char* name = InstanceTypeToString(type); - if (name == NULL) { - // Unknown instance type. Check that there is no objects of that type. - CHECK_EQ(0, objects_per_type[type]); - CHECK_EQ(0, size_per_type[type]); - continue; - } - int size = size_per_type[type]; - running_size += size; - printf("\t%-37s% 9d% 11.3f MB% 10.3f%%% 10.3f%%\n", - name, objects_per_type[type], toM(size), - 100. * size / total_size, 100. * running_size / total_size); - } - printf("\t%-37s% 9d% 11.3f MB% 10.3f%%% 10.3f%%\n", - "total", 0, toM(total_size), 100., 100.); -} - -} // namespace - -int main(int argc, char **argv) { - if (argc != 2) { - fprintf(stderr, "usage: %s \n", argv[0]); - return 1; - } - - DumpHeapStats(argv[1]); - - return 0; -} diff --git a/deps/v8/tools/parser-shell.cc b/deps/v8/tools/parser-shell.cc index ad687c9efea128..43d25781659e5c 100644 --- a/deps/v8/tools/parser-shell.cc +++ b/deps/v8/tools/parser-shell.cc @@ -36,25 +36,16 @@ #include "include/libplatform/libplatform.h" #include "src/api.h" #include "src/compiler.h" -#include "src/parsing/scanner-character-streams.h" +#include "src/parsing/parse-info.h" #include "src/parsing/parser.h" #include "src/parsing/preparse-data-format.h" #include "src/parsing/preparse-data.h" #include "src/parsing/preparser.h" +#include "src/parsing/scanner-character-streams.h" #include "tools/shell-utils.h" using namespace v8::internal; -class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator { - public: - virtual void* Allocate(size_t length) { - void* data = AllocateUninitialized(length); - return data == NULL ? data : memset(data, 0, length); - } - virtual void* AllocateUninitialized(size_t length) { return malloc(length); } - virtual void Free(void* data, size_t) { free(data); } -}; - class StringResource8 : public v8::String::ExternalOneByteStringResource { public: StringResource8(const char* data, int length) @@ -142,7 +133,7 @@ std::pair RunBaselineParser( int main(int argc, char* argv[]) { v8::V8::SetFlagsFromCommandLine(&argc, argv, true); - v8::V8::InitializeICU(); + v8::V8::InitializeICUDefaultLocation(argv[0]); v8::Platform* platform = v8::platform::CreateDefaultPlatform(); v8::V8::InitializePlatform(platform); v8::V8::Initialize(); @@ -168,9 +159,9 @@ int main(int argc, char* argv[]) { fnames.push_back(std::string(argv[i])); } } - ArrayBufferAllocator array_buffer_allocator; v8::Isolate::CreateParams create_params; - create_params.array_buffer_allocator = &array_buffer_allocator; + create_params.array_buffer_allocator = + v8::ArrayBuffer::Allocator::NewDefaultAllocator(); v8::Isolate* isolate = v8::Isolate::New(create_params); { v8::Isolate::Scope isolate_scope(isolate); @@ -199,5 +190,6 @@ int main(int argc, char* argv[]) { v8::V8::Dispose(); v8::V8::ShutdownPlatform(); delete platform; + delete create_params.array_buffer_allocator; return 0; } diff --git a/deps/v8/tools/parser-shell.gyp b/deps/v8/tools/parser-shell.gyp index 77ed1eb246055f..4ef1a82d719572 100644 --- a/deps/v8/tools/parser-shell.gyp +++ b/deps/v8/tools/parser-shell.gyp @@ -30,14 +30,14 @@ 'v8_code': 1, 'v8_enable_i18n_support%': 1, }, - 'includes': ['../build/toolchain.gypi', '../build/features.gypi'], + 'includes': ['../gypfiles/toolchain.gypi', '../gypfiles/features.gypi'], 'targets': [ { 'target_name': 'parser-shell', 'type': 'executable', 'dependencies': [ - '../tools/gyp/v8.gyp:v8', - '../tools/gyp/v8.gyp:v8_libplatform', + '../src/v8.gyp:v8', + '../src/v8.gyp:v8_libplatform', ], 'conditions': [ ['v8_enable_i18n_support==1', { @@ -50,10 +50,6 @@ 'include_dirs+': [ '..', ], - 'defines': [ - # TODO(jochen): Remove again after this is globally turned on. - 'V8_IMMINENT_DEPRECATION_WARNINGS', - ], 'sources': [ 'parser-shell.cc', 'shell-utils.h', diff --git a/deps/v8/tools/perf-to-html.py b/deps/v8/tools/perf-to-html.py index 63faeb1d6602e2..7ec9c50f218fe1 100755 --- a/deps/v8/tools/perf-to-html.py +++ b/deps/v8/tools/perf-to-html.py @@ -115,8 +115,8 @@ def __init__(self, name, data): self.name_ = name self.tests_ = {} for test in data: - # strip off "/" prefix - test_name = test.split("/")[1] + # strip off "/" prefix, allowing for subsequent "/"s + test_name = test.split("/", 1)[1] self.appendResult(test_name, data[test]) # tests is a dictionary of Results diff --git a/deps/v8/tools/plot-timer-events b/deps/v8/tools/plot-timer-events index 15f28ac22b5b84..da2e823c14df61 100755 --- a/deps/v8/tools/plot-timer-events +++ b/deps/v8/tools/plot-timer-events @@ -70,10 +70,9 @@ if test "$contains" -eq 0; then rm $calibration_log # Overhead in picoseconds. - options=--distortion= - options+=`echo "1000*(($t_1_end - $t_1_start) - ($t_2_end - $t_2_start)) \ + distortion=`echo "1000*(($t_1_end - $t_1_start) - ($t_2_end - $t_2_start)) \ / ($n_1 - $n_2)" | bc` - echo $options + options="--distortion=$distortion" fi cat $log_file | diff --git a/deps/v8/tools/presubmit.py b/deps/v8/tools/presubmit.py index dd3533bcf40c91..bd541a2d8ad21e 100755 --- a/deps/v8/tools/presubmit.py +++ b/deps/v8/tools/presubmit.py @@ -55,12 +55,15 @@ # build/include_what_you_use: Started giving false positives for variables # named "string" and "map" assuming that you needed to include STL headers. # TODO(bmeurer): Fix and re-enable readability/check +# TODO(epertoso): Maybe re-enable readability/fn_size after +# http://crrev.com/2199323003 relands. LINT_RULES = """ -build/header_guard -build/include_what_you_use -build/namespaces -readability/check +-readability/fn_size +readability/streams -runtime/references """.split() @@ -296,9 +299,15 @@ def IgnoreDir(self, name): 'corrections.js', 'crypto.js', 'daemon.py', + 'debugger-script.js', 'earley-boyer.js', 'fannkuch.js', 'fasta.js', + 'injected-script.cc', + 'injected-script.h', + 'injected-script-source.js', + 'java-script-call-frame.cc', + 'java-script-call-frame.h', 'jsmin.py', 'libraries.cc', 'libraries-empty.cc', @@ -308,10 +317,19 @@ def IgnoreDir(self, name): 'primes.js', 'raytrace.js', 'regexp-pcre.js', + 'rjsmin.py', + 'script-breakpoint.h', 'sqlite.js', 'sqlite-change-heap.js', 'sqlite-pointer-masking.js', 'sqlite-safe-heap.js', + 'v8-debugger-script.h', + 'v8-function-call.cc', + 'v8-function-call.h', + 'v8-inspector-impl.cc', + 'v8-inspector-impl.h', + 'v8-runtime-agent-impl.cc', + 'v8-runtime-agent-impl.h', 'gnuplot-4.6.3-emscripten.js', 'zlib.js'] IGNORE_TABS = IGNORE_COPYRIGHTS + ['unicode-test.js', 'html-comments.js'] diff --git a/deps/v8/tools/release/auto_roll.py b/deps/v8/tools/release/auto_roll.py index b71cac5a10ae40..c1a99e8d11c52e 100755 --- a/deps/v8/tools/release/auto_roll.py +++ b/deps/v8/tools/release/auto_roll.py @@ -18,7 +18,9 @@ Please close rolling in case of a roll revert: https://v8-roll.appspot.com/ -This only works with a Google account.""") +This only works with a Google account. + +CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_precise_blink_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel""") class Preparation(Step): MESSAGE = "Preparation." diff --git a/deps/v8/tools/release/check_clusterfuzz.py b/deps/v8/tools/release/check_clusterfuzz.py index cd730516855fa4..0fdffd93ac2756 100755 --- a/deps/v8/tools/release/check_clusterfuzz.py +++ b/deps/v8/tools/release/check_clusterfuzz.py @@ -87,6 +87,15 @@ }, "crash_state": ANY_RE, }, + { + "args": { + "job_type": "linux_asan_d8_ignition_v8_arm_dbg", + "reproducible": "True", + "open": "True", + "bug_information": "", + }, + "crash_state": ANY_RE, + }, { "args": { "job_type": "linux_asan_d8_v8_arm64_dbg", diff --git a/deps/v8/tools/release/create_release.py b/deps/v8/tools/release/create_release.py index 7477ea1461f5eb..14d44b4bd60875 100755 --- a/deps/v8/tools/release/create_release.py +++ b/deps/v8/tools/release/create_release.py @@ -223,6 +223,27 @@ def SplitMapJoin(split_text, fun, join_text): os.remove(self.Config("CHANGELOG_ENTRY_FILE")) +class FixBrokenTag(Step): + MESSAGE = "Check for a missing tag and fix that instead." + + def RunStep(self): + commit = None + try: + commit = self.GitLog( + n=1, format="%H", + grep=self["commit_title"], + branch="origin/%s" % self["version"], + ) + except GitFailedException: + # In the normal case, the remote doesn't exist yet and git will fail. + pass + if commit: + print "Found %s. Trying to repair tag and bail out." % self["version"] + self.Git("tag %s %s" % (self["version"], commit)) + self.Git("push origin refs/tags/%s" % self["version"]) + return True + + class PushBranch(Step): MESSAGE = "Push changes." @@ -303,6 +324,7 @@ def _Steps(self): SetVersion, EnableMergeWatchlist, CommitBranch, + FixBrokenTag, PushBranch, TagRevision, CleanUp, diff --git a/deps/v8/tools/release/git_recipes.py b/deps/v8/tools/release/git_recipes.py index 89fd7c9cf66382..b4c9de6d6cb338 100644 --- a/deps/v8/tools/release/git_recipes.py +++ b/deps/v8/tools/release/git_recipes.py @@ -242,6 +242,10 @@ def GitCLLand(self, **kwargs): self.Git( "cl land -f --bypass-hooks", retry_on=lambda x: x is None, **kwargs) + def GitCLAddComment(self, message, **kwargs): + args = ["cl", "comments", "-a", Quoted(message)] + self.Git(MakeArgs(args), **kwargs) + def GitDiff(self, loc1, loc2, **kwargs): return self.Git(MakeArgs(["diff", loc1, loc2]), **kwargs) diff --git a/deps/v8/tools/release/merge_to_branch.py b/deps/v8/tools/release/merge_to_branch.py index 699fe1b3c66dbc..bdc94ebd09164a 100755 --- a/deps/v8/tools/release/merge_to_branch.py +++ b/deps/v8/tools/release/merge_to_branch.py @@ -47,10 +47,8 @@ def RunStep(self): open(self.Config("ALREADY_MERGING_SENTINEL_FILE"), "a").close() self.InitialEnvironmentChecks(self.default_cwd) - if self._options.branch: - self["merge_to_branch"] = self._options.branch - else: # pragma: no cover - self.Die("Please specify a branch to merge to") + + self["merge_to_branch"] = self._options.branch self.CommonPrepare() self.PrepareBranch() @@ -74,7 +72,7 @@ def RunStep(self): for revision in self["full_revision_list"]: # Search for commits which matches the "Port XXX" pattern. git_hashes = self.GitLog(reverse=True, format="%H", - grep="Port %s" % revision, + grep="^[Pp]ort %s" % revision, branch=self.vc.RemoteMasterBranch()) for git_hash in git_hashes.splitlines(): revision_title = self.GitLog(n=1, format="%s", git_hash=git_hash) @@ -99,6 +97,12 @@ def RunStep(self): class CreateCommitMessage(Step): MESSAGE = "Create commit message." + def _create_commit_description(self, commit_hash): + patch_merge_desc = self.GitLog(n=1, format="%s", git_hash=commit_hash) + description = "Merged: " + patch_merge_desc + "\n" + description += "Revision: " + commit_hash + "\n\n" + return description + def RunStep(self): # Stringify: ["abcde", "12345"] -> "abcde, 12345" @@ -107,17 +111,23 @@ def RunStep(self): if not self["revision_list"]: # pragma: no cover self.Die("Revision list is empty.") - action_text = "Merged %s" + msg_pieces = [] - # The commit message title is added below after the version is specified. - msg_pieces = [ - "\n".join(action_text % s for s in self["full_revision_list"]), - ] - msg_pieces.append("\n\n") + if len(self["full_revision_list"]) > 1: + self["commit_title"] = "Merged: Squashed multiple commits." + for commit_hash in self["full_revision_list"]: + msg_pieces.append(self._create_commit_description(commit_hash)) + else: + commit_hash = self["full_revision_list"][0] + full_description = self._create_commit_description(commit_hash).split("\n") - for commit_hash in self["full_revision_list"]: - patch_merge_desc = self.GitLog(n=1, format="%s", git_hash=commit_hash) - msg_pieces.append("%s\n\n" % patch_merge_desc) + #Truncate title because of code review tool + title = full_description[0] + if len(title) > 100: + title = title[:96] + " ..." + + self["commit_title"] = title + msg_pieces.append(full_description[1] + "\n\n") bugs = [] for commit_hash in self["full_revision_list"]: @@ -128,6 +138,8 @@ def RunStep(self): if bug_aggregate: msg_pieces.append("BUG=%s\nLOG=N\n" % bug_aggregate) + msg_pieces.append("NOTRY=true\nNOPRESUBMIT=true\nNOTREECHECKS=true\n") + self["new_commit_msg"] = "".join(msg_pieces) @@ -144,49 +156,26 @@ def RunStep(self): if self._options.patch: self.ApplyPatch(self._options.patch) - -class PrepareVersion(Step): - MESSAGE = "Prepare version file." - - def RunStep(self): - # This is used to calculate the patch level increment. - self.ReadAndPersistVersion() - - -class IncrementVersion(Step): - MESSAGE = "Increment version number." - - def RunStep(self): - new_patch = str(int(self["patch"]) + 1) - if self.Confirm("Automatically increment V8_PATCH_LEVEL? (Saying 'n' will " - "fire up your EDITOR on %s so you can make arbitrary " - "changes. When you're done, save the file and exit your " - "EDITOR.)" % VERSION_FILE): - text = FileToText(os.path.join(self.default_cwd, VERSION_FILE)) - text = MSub(r"(?<=#define V8_PATCH_LEVEL)(?P\s+)\d*$", - r"\g%s" % new_patch, - text) - TextToFile(text, os.path.join(self.default_cwd, VERSION_FILE)) - else: - self.Editor(os.path.join(self.default_cwd, VERSION_FILE)) - self.ReadAndPersistVersion("new_") - self["version"] = "%s.%s.%s.%s" % (self["new_major"], - self["new_minor"], - self["new_build"], - self["new_patch"]) - - class CommitLocal(Step): MESSAGE = "Commit to local branch." def RunStep(self): # Add a commit message title. - self["commit_title"] = "Version %s (cherry-pick)" % self["version"] self["new_commit_msg"] = "%s\n\n%s" % (self["commit_title"], self["new_commit_msg"]) TextToFile(self["new_commit_msg"], self.Config("COMMITMSG_FILE")) self.GitCommit(file_name=self.Config("COMMITMSG_FILE")) +class AddInformationalComment(Step): + MESSAGE = 'Show additional information.' + + def RunStep(self): + message = ("NOTE: This script will no longer automatically " + "update include/v8-version.h " + "and create a tag. This is done automatically by the autotag bot. " + "Please call the merge_to_branch.py with --help for more information.") + + self.GitCLAddComment(message) class CommitRepository(Step): MESSAGE = "Commit to the repository." @@ -197,24 +186,12 @@ def RunStep(self): self.GitPresubmit() self.vc.CLLand() - -class TagRevision(Step): - MESSAGE = "Create the tag." - - def RunStep(self): - print "Creating tag %s" % self["version"] - self.vc.Tag(self["version"], - self.vc.RemoteBranch(self["merge_to_branch"]), - self["commit_title"]) - - class CleanUp(Step): MESSAGE = "Cleanup." def RunStep(self): self.CommonCleanup() print "*** SUMMARY ***" - print "version: %s" % self["version"] print "branch: %s" % self["merge_to_branch"] if self["revision_list"]: print "patches: %s" % self["revision_list"] @@ -223,7 +200,9 @@ def RunStep(self): class MergeToBranch(ScriptsBase): def _Description(self): return ("Performs the necessary steps to merge revisions from " - "master to other branches, including candidates.") + "master to release branches like 4.5. This script does not " + "version the commit. See http://goo.gl/9ke2Vw for more " + "information.") def _PrepareOptions(self, parser): group = parser.add_mutually_exclusive_group(required=True) @@ -250,6 +229,11 @@ def _ProcessOptions(self, options): # CC ulan to make sure that fixes are merged to Google3. options.cc = "ulan@chromium.org" + if len(options.branch.split('.')) > 2: + print ("This script does not support merging to roll branches. " + "Please use tools/release/roll_merge.py for this use case.") + return False + # Make sure to use git hashes in the new workflows. for revision in options.revisions: if (IsSvnNumber(revision) or @@ -276,12 +260,10 @@ def _Steps(self): SearchArchitecturePorts, CreateCommitMessage, ApplyPatches, - PrepareVersion, - IncrementVersion, CommitLocal, UploadStep, + AddInformationalComment, CommitRepository, - TagRevision, CleanUp, ] diff --git a/deps/v8/tools/release/roll_merge.py b/deps/v8/tools/release/roll_merge.py new file mode 100755 index 00000000000000..2dd43eae3ac25b --- /dev/null +++ b/deps/v8/tools/release/roll_merge.py @@ -0,0 +1,290 @@ +#!/usr/bin/env python +# Copyright 2014 the V8 project authors. All rights reserved. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import argparse +from collections import OrderedDict +import sys + +from common_includes import * + +def IsSvnNumber(rev): + return rev.isdigit() and len(rev) < 8 + +class Preparation(Step): + MESSAGE = "Preparation." + + def RunStep(self): + if os.path.exists(self.Config("ALREADY_MERGING_SENTINEL_FILE")): + if self._options.force: + os.remove(self.Config("ALREADY_MERGING_SENTINEL_FILE")) + elif self._options.step == 0: # pragma: no cover + self.Die("A merge is already in progress") + open(self.Config("ALREADY_MERGING_SENTINEL_FILE"), "a").close() + + self.InitialEnvironmentChecks(self.default_cwd) + if self._options.branch: + self["merge_to_branch"] = self._options.branch + else: # pragma: no cover + self.Die("Please specify a branch to merge to") + + self.CommonPrepare() + self.PrepareBranch() + + +class CreateBranch(Step): + MESSAGE = "Create a fresh branch for the patch." + + def RunStep(self): + self.GitCreateBranch(self.Config("BRANCHNAME"), + self.vc.RemoteBranch(self["merge_to_branch"])) + + +class SearchArchitecturePorts(Step): + MESSAGE = "Search for corresponding architecture ports." + + def RunStep(self): + self["full_revision_list"] = list(OrderedDict.fromkeys( + self._options.revisions)) + port_revision_list = [] + for revision in self["full_revision_list"]: + # Search for commits which matches the "Port XXX" pattern. + git_hashes = self.GitLog(reverse=True, format="%H", + grep="Port %s" % revision, + branch=self.vc.RemoteMasterBranch()) + for git_hash in git_hashes.splitlines(): + revision_title = self.GitLog(n=1, format="%s", git_hash=git_hash) + + # Is this revision included in the original revision list? + if git_hash in self["full_revision_list"]: + print("Found port of %s -> %s (already included): %s" + % (revision, git_hash, revision_title)) + else: + print("Found port of %s -> %s: %s" + % (revision, git_hash, revision_title)) + port_revision_list.append(git_hash) + + # Do we find any port? + if len(port_revision_list) > 0: + if self.Confirm("Automatically add corresponding ports (%s)?" + % ", ".join(port_revision_list)): + #: 'y': Add ports to revision list. + self["full_revision_list"].extend(port_revision_list) + + +class CreateCommitMessage(Step): + MESSAGE = "Create commit message." + + def RunStep(self): + + # Stringify: ["abcde", "12345"] -> "abcde, 12345" + self["revision_list"] = ", ".join(self["full_revision_list"]) + + if not self["revision_list"]: # pragma: no cover + self.Die("Revision list is empty.") + + action_text = "Merged %s" + + # The commit message title is added below after the version is specified. + msg_pieces = [ + "\n".join(action_text % s for s in self["full_revision_list"]), + ] + msg_pieces.append("\n\n") + + for commit_hash in self["full_revision_list"]: + patch_merge_desc = self.GitLog(n=1, format="%s", git_hash=commit_hash) + msg_pieces.append("%s\n\n" % patch_merge_desc) + + bugs = [] + for commit_hash in self["full_revision_list"]: + msg = self.GitLog(n=1, git_hash=commit_hash) + for bug in re.findall(r"^[ \t]*BUG[ \t]*=[ \t]*(.*?)[ \t]*$", msg, re.M): + bugs.extend(s.strip() for s in bug.split(",")) + bug_aggregate = ",".join(sorted(filter(lambda s: s and s != "none", bugs))) + if bug_aggregate: + msg_pieces.append("BUG=%s\nLOG=N\n" % bug_aggregate) + + self["new_commit_msg"] = "".join(msg_pieces) + + +class ApplyPatches(Step): + MESSAGE = "Apply patches for selected revisions." + + def RunStep(self): + for commit_hash in self["full_revision_list"]: + print("Applying patch for %s to %s..." + % (commit_hash, self["merge_to_branch"])) + patch = self.GitGetPatch(commit_hash) + TextToFile(patch, self.Config("TEMPORARY_PATCH_FILE")) + self.ApplyPatch(self.Config("TEMPORARY_PATCH_FILE")) + if self._options.patch: + self.ApplyPatch(self._options.patch) + + +class PrepareVersion(Step): + MESSAGE = "Prepare version file." + + def RunStep(self): + # This is used to calculate the patch level increment. + self.ReadAndPersistVersion() + + +class IncrementVersion(Step): + MESSAGE = "Increment version number." + + def RunStep(self): + new_patch = str(int(self["patch"]) + 1) + if self.Confirm("Automatically increment V8_PATCH_LEVEL? (Saying 'n' will " + "fire up your EDITOR on %s so you can make arbitrary " + "changes. When you're done, save the file and exit your " + "EDITOR.)" % VERSION_FILE): + text = FileToText(os.path.join(self.default_cwd, VERSION_FILE)) + text = MSub(r"(?<=#define V8_PATCH_LEVEL)(?P\s+)\d*$", + r"\g%s" % new_patch, + text) + TextToFile(text, os.path.join(self.default_cwd, VERSION_FILE)) + else: + self.Editor(os.path.join(self.default_cwd, VERSION_FILE)) + self.ReadAndPersistVersion("new_") + self["version"] = "%s.%s.%s.%s" % (self["new_major"], + self["new_minor"], + self["new_build"], + self["new_patch"]) + + +class CommitLocal(Step): + MESSAGE = "Commit to local branch." + + def RunStep(self): + # Add a commit message title. + self["commit_title"] = "Version %s (cherry-pick)" % self["version"] + self["new_commit_msg"] = "%s\n\n%s" % (self["commit_title"], + self["new_commit_msg"]) + TextToFile(self["new_commit_msg"], self.Config("COMMITMSG_FILE")) + self.GitCommit(file_name=self.Config("COMMITMSG_FILE")) + + +class CommitRepository(Step): + MESSAGE = "Commit to the repository." + + def RunStep(self): + self.GitCheckout(self.Config("BRANCHNAME")) + self.WaitForLGTM() + self.GitPresubmit() + self.vc.CLLand() + + +class TagRevision(Step): + MESSAGE = "Create the tag." + + def RunStep(self): + print "Creating tag %s" % self["version"] + self.vc.Tag(self["version"], + self.vc.RemoteBranch(self["merge_to_branch"]), + self["commit_title"]) + + +class CleanUp(Step): + MESSAGE = "Cleanup." + + def RunStep(self): + self.CommonCleanup() + print "*** SUMMARY ***" + print "version: %s" % self["version"] + print "branch: %s" % self["merge_to_branch"] + if self["revision_list"]: + print "patches: %s" % self["revision_list"] + + +class RollMerge(ScriptsBase): + def _Description(self): + return ("Performs the necessary steps to merge revisions from " + "master to other branches, including candidates and roll branches.") + + def _PrepareOptions(self, parser): + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument("--branch", help="The branch to merge to.") + parser.add_argument("revisions", nargs="*", + help="The revisions to merge.") + parser.add_argument("-f", "--force", + help="Delete sentinel file.", + default=False, action="store_true") + parser.add_argument("-m", "--message", + help="A commit message for the patch.") + parser.add_argument("-p", "--patch", + help="A patch file to apply as part of the merge.") + + def _ProcessOptions(self, options): + if len(options.revisions) < 1: + if not options.patch: + print "Either a patch file or revision numbers must be specified" + return False + if not options.message: + print "You must specify a merge comment if no patches are specified" + return False + options.bypass_upload_hooks = True + # CC ulan to make sure that fixes are merged to Google3. + options.cc = "ulan@chromium.org" + + # Make sure to use git hashes in the new workflows. + for revision in options.revisions: + if (IsSvnNumber(revision) or + (revision[0:1] == "r" and IsSvnNumber(revision[1:]))): + print "Please provide full git hashes of the patches to merge." + print "Got: %s" % revision + return False + return True + + def _Config(self): + return { + "BRANCHNAME": "prepare-merge", + "PERSISTFILE_BASENAME": "/tmp/v8-merge-to-branch-tempfile", + "ALREADY_MERGING_SENTINEL_FILE": + "/tmp/v8-merge-to-branch-tempfile-already-merging", + "TEMPORARY_PATCH_FILE": "/tmp/v8-prepare-merge-tempfile-temporary-patch", + "COMMITMSG_FILE": "/tmp/v8-prepare-merge-tempfile-commitmsg", + } + + def _Steps(self): + return [ + Preparation, + CreateBranch, + SearchArchitecturePorts, + CreateCommitMessage, + ApplyPatches, + PrepareVersion, + IncrementVersion, + CommitLocal, + UploadStep, + CommitRepository, + TagRevision, + CleanUp, + ] + + +if __name__ == "__main__": # pragma: no cover + sys.exit(RollMerge().Run()) diff --git a/deps/v8/tools/release/test_scripts.py b/deps/v8/tools/release/test_scripts.py index 05457c9285c172..ab92e89f3a05ca 100644 --- a/deps/v8/tools/release/test_scripts.py +++ b/deps/v8/tools/release/test_scripts.py @@ -40,13 +40,14 @@ import create_release from create_release import CreateRelease import merge_to_branch -from merge_to_branch import * +from merge_to_branch import MergeToBranch import push_to_candidates from push_to_candidates import * import releases from releases import Releases from auto_tag import AutoTag - +import roll_merge +from roll_merge import RollMerge TEST_CONFIG = { "DEFAULT_CWD": None, @@ -528,7 +529,7 @@ def testTagTimeout(self): self._state["version"] = "tag_name" self._state["commit_title"] = "Title" self.assertRaises(Exception, - lambda: self.RunStep(MergeToBranch, TagRevision, args)) + lambda: self.RunStep(RollMerge, TagRevision, args)) def testReadAndPersistVersion(self): self.WriteFakeVersionFile(build=5) @@ -970,6 +971,8 @@ def CheckVersionCommit(): cb=self.WriteFakeWatchlistsFile), Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], "", cb=CheckVersionCommit), + Cmd("git log -1 --format=%H --grep=\"Version 3.22.5\" origin/3.22.5", + ""), Cmd("git push origin " "refs/heads/work-branch:refs/pending/heads/3.22.5 " "push_hash:refs/pending-tags/heads/3.22.5 " @@ -1041,6 +1044,8 @@ def CheckVersionCommit(): https://v8-roll.appspot.com/ This only works with a Google account. +CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_precise_blink_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel + TBR=reviewer@chromium.org""" # Snippet from the original DEPS file. @@ -1171,7 +1176,7 @@ def testAutoPush(self): self.assertEquals("abc123", state["candidate"]) - def testMergeToBranch(self): + def testRollMerge(self): TEST_CONFIG["ALREADY_MERGING_SENTINEL_FILE"] = self.MakeEmptyTempFile() TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git")) self.WriteFakeVersionFile(build=5) @@ -1295,6 +1300,377 @@ def VerifyLand(): Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""), ]) + # ab12345 and ab34567 are patches. ab23456 (included) and ab45678 are the + # MIPS ports of ab12345. ab56789 is the MIPS port of ab34567. + args = ["-f", "-p", extra_patch, "--branch", "candidates", + "ab12345", "ab23456", "ab34567"] + + # The first run of the script stops because of git being down. + self.assertRaises(GitFailedException, + lambda: RollMerge(TEST_CONFIG, self).Run(args)) + + # Test that state recovery after restarting the script works. + args += ["-s", "4"] + RollMerge(TEST_CONFIG, self).Run(args) + + def testReleases(self): + c_hash1_commit_log = """Update V8 to Version 4.2.71. + +Cr-Commit-Position: refs/heads/master@{#5678} +""" + c_hash2_commit_log = """Revert something. + +BUG=12345 + +Reason: +> Some reason. +> Cr-Commit-Position: refs/heads/master@{#12345} +> git-svn-id: svn://svn.chromium.org/chrome/trunk/src@12345 003-1c4 + +Review URL: https://codereview.chromium.org/12345 + +Cr-Commit-Position: refs/heads/master@{#4567} +git-svn-id: svn://svn.chromium.org/chrome/trunk/src@4567 0039-1c4b + +""" + c_hash3_commit_log = """Simple. + +git-svn-id: svn://svn.chromium.org/chrome/trunk/src@3456 0039-1c4b + +""" + c_hash_234_commit_log = """Version 3.3.1.1 (cherry-pick). + +Merged abc12. + +Review URL: fake.com + +Cr-Commit-Position: refs/heads/candidates@{#234} +""" + c_hash_123_commit_log = """Version 3.3.1.0 + +git-svn-id: googlecode@123 0039-1c4b +""" + c_hash_345_commit_log = """Version 3.4.0. + +Cr-Commit-Position: refs/heads/candidates@{#345} +""" + c_hash_456_commit_log = """Version 4.2.71. + +Cr-Commit-Position: refs/heads/4.2.71@{#1} +""" + c_deps = "Line\n \"v8_revision\": \"%s\",\n line\n" + + json_output = self.MakeEmptyTempFile() + csv_output = self.MakeEmptyTempFile() + self.WriteFakeVersionFile() + + TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory() + chrome_dir = TEST_CONFIG["CHROMIUM"] + chrome_v8_dir = os.path.join(chrome_dir, "v8") + os.makedirs(chrome_v8_dir) + + def ResetVersion(major, minor, build, patch=0): + return lambda: self.WriteFakeVersionFile(major=major, + minor=minor, + build=build, + patch=patch) + + self.Expect([ + Cmd("git status -s -uno", ""), + Cmd("git checkout -f origin/master", ""), + Cmd("git fetch", ""), + Cmd("git branch", " branch1\n* branch2\n"), + Cmd("git new-branch %s" % TEST_CONFIG["BRANCHNAME"], ""), + Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""), + Cmd("git rev-list --max-age=395200 --tags", + "bad_tag\nhash_234\nhash_123\nhash_345\nhash_456\n"), + Cmd("git describe --tags bad_tag", "3.23.42-1-deadbeef"), + Cmd("git describe --tags hash_234", "3.3.1.1"), + Cmd("git describe --tags hash_123", "3.21.2"), + Cmd("git describe --tags hash_345", "3.22.3"), + Cmd("git describe --tags hash_456", "4.2.71"), + Cmd("git diff --name-only hash_234 hash_234^", VERSION_FILE), + Cmd("git checkout -f hash_234 -- %s" % VERSION_FILE, "", + cb=ResetVersion(3, 3, 1, 1)), + Cmd("git branch -r --contains hash_234", " branch-heads/3.3\n"), + Cmd("git log -1 --format=%B hash_234", c_hash_234_commit_log), + Cmd("git log -1 --format=%s hash_234", ""), + Cmd("git log -1 --format=%B hash_234", c_hash_234_commit_log), + Cmd("git log -1 --format=%ci hash_234", "18:15"), + Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "", + cb=ResetVersion(3, 22, 5)), + Cmd("git diff --name-only hash_123 hash_123^", VERSION_FILE), + Cmd("git checkout -f hash_123 -- %s" % VERSION_FILE, "", + cb=ResetVersion(3, 21, 2)), + Cmd("git branch -r --contains hash_123", " branch-heads/3.21\n"), + Cmd("git log -1 --format=%B hash_123", c_hash_123_commit_log), + Cmd("git log -1 --format=%s hash_123", ""), + Cmd("git log -1 --format=%B hash_123", c_hash_123_commit_log), + Cmd("git log -1 --format=%ci hash_123", "03:15"), + Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "", + cb=ResetVersion(3, 22, 5)), + Cmd("git diff --name-only hash_345 hash_345^", VERSION_FILE), + Cmd("git checkout -f hash_345 -- %s" % VERSION_FILE, "", + cb=ResetVersion(3, 22, 3)), + Cmd("git branch -r --contains hash_345", " origin/candidates\n"), + Cmd("git log -1 --format=%B hash_345", c_hash_345_commit_log), + Cmd("git log -1 --format=%s hash_345", ""), + Cmd("git log -1 --format=%B hash_345", c_hash_345_commit_log), + Cmd("git log -1 --format=%ci hash_345", ""), + Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "", + cb=ResetVersion(3, 22, 5)), + Cmd("git diff --name-only hash_456 hash_456^", VERSION_FILE), + Cmd("git checkout -f hash_456 -- %s" % VERSION_FILE, "", + cb=ResetVersion(4, 2, 71)), + Cmd("git branch -r --contains hash_456", " origin/4.2.71\n"), + Cmd("git log -1 --format=%B hash_456", c_hash_456_commit_log), + Cmd("git log -1 --format=%H 4.2.71", "hash_456"), + Cmd("git log -1 --format=%s hash_456", "Version 4.2.71"), + Cmd("git log -1 --format=%H hash_456^", "master_456"), + Cmd("git log -1 --format=%B master_456", + "Cr-Commit-Position: refs/heads/master@{#456}"), + Cmd("git log -1 --format=%B hash_456", c_hash_456_commit_log), + Cmd("git log -1 --format=%ci hash_456", "02:15"), + Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "", + cb=ResetVersion(3, 22, 5)), + Cmd("git fetch origin +refs/heads/*:refs/remotes/origin/* " + "+refs/branch-heads/*:refs/remotes/branch-heads/*", "", + cwd=chrome_dir), + Cmd("git fetch origin", "", cwd=chrome_v8_dir), + Cmd("git log --format=%H --grep=\"V8\" origin/master -- DEPS", + "c_hash1\nc_hash2\nc_hash3\n", + cwd=chrome_dir), + Cmd("git show c_hash1:DEPS", c_deps % "hash_456", cwd=chrome_dir), + Cmd("git log -1 --format=%B c_hash1", c_hash1_commit_log, + cwd=chrome_dir), + Cmd("git show c_hash2:DEPS", c_deps % "hash_345", cwd=chrome_dir), + Cmd("git log -1 --format=%B c_hash2", c_hash2_commit_log, + cwd=chrome_dir), + Cmd("git show c_hash3:DEPS", c_deps % "deadbeef", cwd=chrome_dir), + Cmd("git log -1 --format=%B c_hash3", c_hash3_commit_log, + cwd=chrome_dir), + Cmd("git branch -r", " weird/123\n branch-heads/7\n", cwd=chrome_dir), + Cmd("git show refs/branch-heads/7:DEPS", c_deps % "hash_345", + cwd=chrome_dir), + URL("http://omahaproxy.appspot.com/all.json", """[{ + "os": "win", + "versions": [{ + "version": "2.2.2.2", + "v8_version": "22.2.2.2", + "current_reldate": "04/09/15", + "os": "win", + "channel": "canary", + "previous_version": "1.1.1.0" + }] + }]"""), + URL("http://omahaproxy.appspot.com/v8.json?version=1.1.1.0", """{ + "chromium_version": "1.1.1.0", + "v8_version": "11.1.1.0" + }"""), + Cmd("git rev-list -1 11.1.1", "v8_previous_version_hash"), + Cmd("git rev-list -1 22.2.2.2", "v8_version_hash"), + Cmd("git checkout -f origin/master", ""), + Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], "") + ]) + + args = ["-c", TEST_CONFIG["CHROMIUM"], + "--json", json_output, + "--csv", csv_output, + "--max-releases", "1"] + Releases(TEST_CONFIG, self).Run(args) + + # Check expected output. + csv = ("4.2.71,4.2.71,1,5678,\r\n" + "3.22.3,candidates,345,4567:5677,\r\n" + "3.21.2,3.21,123,,\r\n" + "3.3.1.1,3.3,234,,abc12\r\n") + self.assertEquals(csv, FileToText(csv_output)) + + expected_json = {"chrome_releases":{ + "canaries": [ + { + "chrome_version": "2.2.2.2", + "os": "win", + "release_date": "04/09/15", + "v8_version": "22.2.2.2", + "v8_version_hash": "v8_version_hash", + "v8_previous_version": "11.1.1.0", + "v8_previous_version_hash": "v8_previous_version_hash" + }]}, + "releases":[ + { + "revision": "1", + "revision_git": "hash_456", + "master_position": "456", + "master_hash": "master_456", + "patches_merged": "", + "version": "4.2.71", + "chromium_revision": "5678", + "branch": "4.2.71", + "review_link": "", + "date": "02:15", + "chromium_branch": "", + # FIXME(machenbach): Fix revisions link for git. + "revision_link": "https://code.google.com/p/v8/source/detail?r=1", + }, + { + "revision": "345", + "revision_git": "hash_345", + "master_position": "", + "master_hash": "", + "patches_merged": "", + "version": "3.22.3", + "chromium_revision": "4567:5677", + "branch": "candidates", + "review_link": "", + "date": "", + "chromium_branch": "7", + "revision_link": "https://code.google.com/p/v8/source/detail?r=345", + }, + { + "revision": "123", + "revision_git": "hash_123", + "patches_merged": "", + "master_position": "", + "master_hash": "", + "version": "3.21.2", + "chromium_revision": "", + "branch": "3.21", + "review_link": "", + "date": "03:15", + "chromium_branch": "", + "revision_link": "https://code.google.com/p/v8/source/detail?r=123", + }, + { + "revision": "234", + "revision_git": "hash_234", + "patches_merged": "abc12", + "master_position": "", + "master_hash": "", + "version": "3.3.1.1", + "chromium_revision": "", + "branch": "3.3", + "review_link": "fake.com", + "date": "18:15", + "chromium_branch": "", + "revision_link": "https://code.google.com/p/v8/source/detail?r=234", + },], + } + self.assertEquals(expected_json, json.loads(FileToText(json_output))) + + def testMergeToBranch(self): + TEST_CONFIG["ALREADY_MERGING_SENTINEL_FILE"] = self.MakeEmptyTempFile() + TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git")) + self.WriteFakeVersionFile(build=5) + os.environ["EDITOR"] = "vi" + extra_patch = self.MakeEmptyTempFile() + + + def VerifyPatch(patch): + return lambda: self.assertEquals(patch, + FileToText(TEST_CONFIG["TEMPORARY_PATCH_FILE"])) + + info_msg = ("NOTE: This script will no longer automatically " + "update include/v8-version.h " + "and create a tag. This is done automatically by the autotag bot. " + "Please call the merge_to_branch.py with --help for more information.") + + msg = """Merged: Squashed multiple commits. + +Merged: Title4 +Revision: ab12345 + +Merged: Title2 +Revision: ab23456 + +Merged: Title3 +Revision: ab34567 + +Merged: Title1 +Revision: ab45678 + +Merged: Revert \"Something\" +Revision: ab56789 + +BUG=123,234,345,456,567,v8:123 +LOG=N +NOTRY=true +NOPRESUBMIT=true +NOTREECHECKS=true +""" + + def VerifyLand(): + commit = FileToText(TEST_CONFIG["COMMITMSG_FILE"]) + self.assertEquals(msg, commit) + + self.Expect([ + Cmd("git status -s -uno", ""), + Cmd("git checkout -f origin/master", ""), + Cmd("git fetch", ""), + Cmd("git branch", " branch1\n* branch2\n"), + Cmd("git new-branch %s --upstream refs/remotes/origin/candidates" % + TEST_CONFIG["BRANCHNAME"], ""), + Cmd(("git log --format=%H --grep=\"^[Pp]ort ab12345\" " + "--reverse origin/master"), + "ab45678\nab23456"), + Cmd("git log -1 --format=%s ab45678", "Title1"), + Cmd("git log -1 --format=%s ab23456", "Title2"), + Cmd(("git log --format=%H --grep=\"^[Pp]ort ab23456\" " + "--reverse origin/master"), + ""), + Cmd(("git log --format=%H --grep=\"^[Pp]ort ab34567\" " + "--reverse origin/master"), + "ab56789"), + Cmd("git log -1 --format=%s ab56789", "Title3"), + RL("Y"), # Automatically add corresponding ports (ab34567, ab56789)? + # Simulate git being down which stops the script. + Cmd("git log -1 --format=%s ab12345", None), + # Restart script in the failing step. + Cmd("git log -1 --format=%s ab12345", "Title4"), + Cmd("git log -1 --format=%s ab23456", "Title2"), + Cmd("git log -1 --format=%s ab34567", "Title3"), + Cmd("git log -1 --format=%s ab45678", "Title1"), + Cmd("git log -1 --format=%s ab56789", "Revert \"Something\""), + Cmd("git log -1 ab12345", "Title4\nBUG=123\nBUG=234"), + Cmd("git log -1 ab23456", "Title2\n BUG = v8:123,345"), + Cmd("git log -1 ab34567", "Title3\nLOG=n\nBUG=567, 456"), + Cmd("git log -1 ab45678", "Title1\nBUG="), + Cmd("git log -1 ab56789", "Revert \"Something\"\nBUG=none"), + Cmd("git log -1 -p ab12345", "patch4"), + Cmd(("git apply --index --reject \"%s\"" % + TEST_CONFIG["TEMPORARY_PATCH_FILE"]), + "", cb=VerifyPatch("patch4")), + Cmd("git log -1 -p ab23456", "patch2"), + Cmd(("git apply --index --reject \"%s\"" % + TEST_CONFIG["TEMPORARY_PATCH_FILE"]), + "", cb=VerifyPatch("patch2")), + Cmd("git log -1 -p ab34567", "patch3"), + Cmd(("git apply --index --reject \"%s\"" % + TEST_CONFIG["TEMPORARY_PATCH_FILE"]), + "", cb=VerifyPatch("patch3")), + Cmd("git log -1 -p ab45678", "patch1"), + Cmd(("git apply --index --reject \"%s\"" % + TEST_CONFIG["TEMPORARY_PATCH_FILE"]), + "", cb=VerifyPatch("patch1")), + Cmd("git log -1 -p ab56789", "patch5\n"), + Cmd(("git apply --index --reject \"%s\"" % + TEST_CONFIG["TEMPORARY_PATCH_FILE"]), + "", cb=VerifyPatch("patch5\n")), + Cmd("git apply --index --reject \"%s\"" % extra_patch, ""), + Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], ""), + RL("reviewer@chromium.org"), # V8 reviewer. + Cmd("git cl upload --send-mail -r \"reviewer@chromium.org\" " + "--bypass-hooks --cc \"ulan@chromium.org\"", ""), + Cmd("git cl comments -a \"%s\"" % info_msg, ""), + Cmd("git checkout -f %s" % TEST_CONFIG["BRANCHNAME"], ""), + RL("LGTM"), # Enter LGTM for V8 CL. + Cmd("git cl presubmit", "Presubmit successfull\n"), + Cmd("git cl land -f --bypass-hooks", "Closing issue\n", + cb=VerifyLand), + Cmd("git checkout -f origin/master", ""), + Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""), + ]) + # ab12345 and ab34567 are patches. ab23456 (included) and ab45678 are the # MIPS ports of ab12345. ab56789 is the MIPS port of ab34567. args = ["-f", "-p", extra_patch, "--branch", "candidates", @@ -1554,6 +1930,8 @@ def ResetVersion(major, minor, build, patch=0): self.assertEquals(expected_json, json.loads(FileToText(json_output))) + + class SystemTest(unittest.TestCase): def testReload(self): options = ScriptsBase( diff --git a/deps/v8/tools/run-deopt-fuzzer.gyp b/deps/v8/tools/run-deopt-fuzzer.gyp index 73f0aaf7a521cd..9eb6b538bc2343 100644 --- a/deps/v8/tools/run-deopt-fuzzer.gyp +++ b/deps/v8/tools/run-deopt-fuzzer.gyp @@ -13,8 +13,8 @@ '../src/d8.gyp:d8_run', ], 'includes': [ - '../build/features.gypi', - '../build/isolate.gypi', + '../gypfiles/features.gypi', + '../gypfiles/isolate.gypi', ], 'sources': [ 'run-deopt-fuzzer.isolate', diff --git a/deps/v8/tools/run-deopt-fuzzer.py b/deps/v8/tools/run-deopt-fuzzer.py index 970aa8e616e52f..b143430d27e5a6 100755 --- a/deps/v8/tools/run-deopt-fuzzer.py +++ b/deps/v8/tools/run-deopt-fuzzer.py @@ -74,16 +74,12 @@ "s390", "s390x", "mipsel", - "nacl_ia32", - "nacl_x64", "x64"] # Double the timeout for these: SLOW_ARCHS = ["android_arm", "android_ia32", "arm", - "mipsel", - "nacl_ia32", - "nacl_x64"] + "mipsel"] MAX_DEOPT = 1000000000 DISTRIBUTION_MODES = ["smooth", "random"] @@ -398,7 +394,6 @@ def Execute(arch, mode, args, options, suites, workspace): "deopt_fuzzer": True, "gc_stress": False, "gcov_coverage": False, - "ignition": False, "isolates": options.isolates, "mode": mode, "no_i18n": False, diff --git a/deps/v8/tools/run-perf.sh b/deps/v8/tools/run-perf.sh index 24053b40fb0aa3..03123fdbb84df8 100755 --- a/deps/v8/tools/run-perf.sh +++ b/deps/v8/tools/run-perf.sh @@ -13,6 +13,7 @@ SAMPLE_EVERY_N_CYCLES=10000 SAMPLE_RATE_CONFIG_FILE="/proc/sys/kernel/perf_event_max_sample_rate" KERNEL_MAP_CONFIG_FILE="/proc/sys/kernel/kptr_restrict" CALL_GRAPH_METHOD="fp" # dwarf does not play nice with JITted objects. +EVENT_TYPE=${EVENT_TYPE:=cycles:u} ########## Usage @@ -46,7 +47,7 @@ fi echo "Running..." perf record -R \ - -e cycles:u \ + -e $EVENT_TYPE \ -c $SAMPLE_EVERY_N_CYCLES \ --call-graph $CALL_GRAPH_METHOD \ -i $@ --perf_basic_prof diff --git a/deps/v8/tools/run-tests.py b/deps/v8/tools/run-tests.py index a380c97ad3951d..f248dff5cc3eb4 100755 --- a/deps/v8/tools/run-tests.py +++ b/deps/v8/tools/run-tests.py @@ -30,10 +30,11 @@ from collections import OrderedDict import itertools +import json import multiprocessing import optparse import os -from os.path import join +from os.path import getmtime, isdir, join import platform import random import shlex @@ -44,7 +45,7 @@ from testrunner.local import execution from testrunner.local import progress from testrunner.local import testsuite -from testrunner.local.testsuite import ALL_VARIANTS +from testrunner.local.variants import ALL_VARIANTS from testrunner.local import utils from testrunner.local import verbose from testrunner.network import network_execution @@ -54,6 +55,8 @@ # Base dir of the v8 checkout to be used as cwd. BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +DEFAULT_OUT_GN = "out.gn" + ARCH_GUESS = utils.DefaultArch() # Map of test name synonyms to lists of test suites. Should be ordered by @@ -81,13 +84,6 @@ "intl", "unittests", ], - # This needs to stay in sync with test/ignition.isolate. - "ignition": [ - "mjsunit", - "cctest", - "webkit", - "message", - ], # This needs to stay in sync with test/optimize_for_size.isolate. "optimize_for_size": [ "mjsunit", @@ -102,13 +98,26 @@ TIMEOUT_DEFAULT = 60 -VARIANTS = ["default", "stress", "turbofan"] +VARIANTS = ["default", "turbofan", "ignition_staging"] -EXHAUSTIVE_VARIANTS = VARIANTS + [ - "nocrankshaft", +MORE_VARIANTS = [ + "ignition", + "stress", "turbofan_opt", + "asm_wasm", ] +EXHAUSTIVE_VARIANTS = VARIANTS + MORE_VARIANTS + +VARIANT_ALIASES = { + # The default for developer workstations. + "dev": VARIANTS, + # Additional variants, run on all bots. + "more": MORE_VARIANTS, + # Additional variants, run on a subset of bots. + "extra": ["nocrankshaft"], +} + DEBUG_FLAGS = ["--nohard-abort", "--nodead-code-elimination", "--nofold-constants", "--enable-slow-asserts", "--debug-code", "--verify-heap"] @@ -173,8 +182,6 @@ "mipsel", "mips64", "mips64el", - "nacl_ia32", - "nacl_x64", "s390", "s390x", "ppc", @@ -192,8 +199,8 @@ "mipsel", "mips64", "mips64el", - "nacl_ia32", - "nacl_x64", + "s390", + "s390x", "x87", "arm64"] @@ -246,13 +253,11 @@ def BuildOptions(): result.add_option("--download-data", help="Download missing test suite data", default=False, action="store_true") result.add_option("--download-data-only", - help="Download missing test suite data and exit", + help="Deprecated", default=False, action="store_true") result.add_option("--extra-flags", help="Additional flags to pass to each test command", default="") - result.add_option("--ignition", help="Skip tests which don't run in ignition", - default=False, action="store_true") result.add_option("--isolates", help="Whether to test isolates", default=False, action="store_true") result.add_option("-j", help="The number of parallel tasks to run", @@ -272,7 +277,7 @@ def BuildOptions(): default=(utils.GuessOS() != "linux"), dest="no_network", action="store_true") result.add_option("--no-presubmit", "--nopresubmit", - help='Skip presubmit checks', + help='Skip presubmit checks (deprecated)', default=False, dest="no_presubmit", action="store_true") result.add_option("--no-snap", "--nosnap", help='Test a build compiled without snapshot.', @@ -280,19 +285,20 @@ def BuildOptions(): result.add_option("--no-sorting", "--nosorting", help="Don't sort tests according to duration of last run.", default=False, dest="no_sorting", action="store_true") - result.add_option("--no-stress", "--nostress", - help="Don't run crankshaft --always-opt --stress-op test", - default=False, dest="no_stress", action="store_true") result.add_option("--no-variants", "--novariants", help="Don't run any testing variants", default=False, dest="no_variants", action="store_true") result.add_option("--variants", - help="Comma-separated list of testing variants: %s" % VARIANTS) + help="Comma-separated list of testing variants;" + " default: \"%s\"" % ",".join(VARIANTS)) result.add_option("--exhaustive-variants", default=False, action="store_true", - help="Use exhaustive set of default variants.") + help="Use exhaustive set of default variants:" + " \"%s\"" % ",".join(EXHAUSTIVE_VARIANTS)) result.add_option("--outdir", help="Base directory with compile output", default="out") + result.add_option("--gn", help="Scan out.gn for the last built configuration", + default=False, action="store_true") result.add_option("--predictable", help="Compare output of several reruns of each test", default=False, action="store_true") @@ -326,16 +332,13 @@ def BuildOptions(): help="Don't skip more slow tests when using a simulator.", default=False, action="store_true", dest="dont_skip_simulator_slow_tests") - result.add_option("--stress-only", - help="Only run tests with --always-opt --stress-opt", - default=False, action="store_true") result.add_option("--swarming", help="Indicates running test driver on swarming.", default=False, action="store_true") result.add_option("--time", help="Print timing information after running", default=False, action="store_true") result.add_option("-t", "--timeout", help="Timeout in seconds", - default= -1, type="int") + default=TIMEOUT_DEFAULT, type="int") result.add_option("--tsan", help="Regard test expectations for TSAN", default=False, action="store_true") @@ -378,6 +381,10 @@ def BuildbotToV8Mode(config): def SetupEnvironment(options): """Setup additional environment variables.""" + + # Many tests assume an English interface. + os.environ['LANG'] = 'en_US.UTF-8' + symbolizer = 'external_symbolizer_path=%s' % ( os.path.join( BASE_DIR, 'third_party', 'llvm-build', 'Release+Asserts', 'bin', @@ -420,10 +427,51 @@ def SetupEnvironment(options): ]) def ProcessOptions(options): - global ALL_VARIANTS - global EXHAUSTIVE_VARIANTS global VARIANTS + # First try to auto-detect configurations based on the build if GN was + # used. This can't be overridden by cmd-line arguments. + options.auto_detect = False + if options.gn: + gn_out_dir = os.path.join(BASE_DIR, DEFAULT_OUT_GN) + latest_timestamp = -1 + latest_config = None + for gn_config in os.listdir(gn_out_dir): + gn_config_dir = os.path.join(gn_out_dir, gn_config) + if not isdir(gn_config_dir): + continue + if os.path.getmtime(gn_config_dir) > latest_timestamp: + latest_timestamp = os.path.getmtime(gn_config_dir) + latest_config = gn_config + if latest_config: + print(">>> Latest GN build found is %s" % latest_config) + options.outdir = os.path.join(DEFAULT_OUT_GN, latest_config) + + build_config_path = os.path.join( + BASE_DIR, options.outdir, "v8_build_config.json") + if os.path.exists(build_config_path): + try: + with open(build_config_path) as f: + build_config = json.load(f) + except Exception: + print ("%s exists but contains invalid json. Is your build up-to-date?" % + build_config_path) + return False + options.auto_detect = True + + options.arch_and_mode = None + options.arch = build_config["v8_target_cpu"] + if options.arch == 'x86': + # TODO(machenbach): Transform all to x86 eventually. + options.arch = 'ia32' + options.asan = build_config["is_asan"] + options.dcheck_always_on = build_config["dcheck_always_on"] + options.mode = 'debug' if build_config["is_debug"] else 'release' + options.msan = build_config["is_msan"] + options.no_i18n = not build_config["v8_enable_i18n_support"] + options.no_snap = not build_config["v8_use_snapshot"] + options.tsan = build_config["is_tsan"] + # Architecture and mode related stuff. if options.arch_and_mode: options.arch_and_mode = [arch_and_mode.split(".") @@ -451,11 +499,7 @@ def ProcessOptions(options): # Special processing of other options, sorted alphabetically. if options.buildbot: - # Buildbots run presubmit tests as a separate step. - options.no_presubmit = True options.no_network = True - if options.download_data_only: - options.no_presubmit = True if options.command_prefix: print("Specifying --command-prefix disables network distribution, " "running tests locally.") @@ -478,6 +522,8 @@ def ProcessOptions(options): # Other options for manipulating variants still apply afterwards. VARIANTS = EXHAUSTIVE_VARIANTS + # TODO(machenbach): Figure out how to test a bigger subset of variants on + # msan and tsan. if options.msan: VARIANTS = ["default"] @@ -494,23 +540,25 @@ def excl(*args): """Returns true if zero or one of multiple arguments are true.""" return reduce(lambda x, y: x + y, args) <= 1 - if not excl(options.no_stress, options.stress_only, options.no_variants, - bool(options.variants)): - print("Use only one of --no-stress, --stress-only, --no-variants, " - "or --variants.") + if not excl(options.no_variants, bool(options.variants)): + print("Use only one of --no-variants or --variants.") return False if options.quickcheck: VARIANTS = ["default", "stress"] options.slow_tests = "skip" options.pass_fail_tests = "skip" - if options.no_stress: - VARIANTS = ["default", "nocrankshaft"] if options.no_variants: VARIANTS = ["default"] - if options.stress_only: - VARIANTS = ["stress"] if options.variants: VARIANTS = options.variants.split(",") + + # Resolve variant aliases. + VARIANTS = reduce( + list.__add__, + (VARIANT_ALIASES.get(v, [v]) for v in VARIANTS), + [], + ) + if not set(VARIANTS).issubset(ALL_VARIANTS): print "All variants must be in %s" % str(ALL_VARIANTS) return False @@ -520,6 +568,9 @@ def excl(*args): options.extra_flags.append("--verify_predictable") options.extra_flags.append("--no-inline-new") + # Dedupe. + VARIANTS = list(set(VARIANTS)) + if not options.shell_dir: if options.shell: print "Warning: --shell is deprecated, use --shell-dir instead." @@ -591,11 +642,12 @@ def Main(): return 1 SetupEnvironment(options) + if options.swarming: + # Swarming doesn't print how isolated commands are called. Lets make this + # less cryptic by printing it ourselves. + print ' '.join(sys.argv) + exit_code = 0 - if not options.no_presubmit: - print ">>> running presubmit tests" - exit_code = subprocess.call( - [sys.executable, join(BASE_DIR, "tools", "presubmit.py")]) suite_paths = utils.GetSuitePaths(join(BASE_DIR, "test")) @@ -633,6 +685,9 @@ def ExpandTestGroups(name): if options.download_data_only: return exit_code + for s in suites: + s.PrepareSources() + for (arch, mode) in options.arch_and_mode: try: code = Execute(arch, mode, args, options, suites) @@ -652,6 +707,10 @@ def Execute(arch, mode, args, options, suites): # buildbot. Currently this is capitalized Release and Debug. shell_dir = os.path.join(BASE_DIR, options.outdir, mode) mode = BuildbotToV8Mode(mode) + elif options.auto_detect: + # If an output dir with a build was passed, test directly in that + # directory. + shell_dir = os.path.join(BASE_DIR, options.outdir) else: shell_dir = os.path.join( BASE_DIR, @@ -663,19 +722,16 @@ def Execute(arch, mode, args, options, suites): # Populate context object. mode_flags = MODES[mode]["flags"] - timeout = options.timeout - if timeout == -1: - # Simulators are slow, therefore allow a longer default timeout. - if arch in SLOW_ARCHS: - timeout = 2 * TIMEOUT_DEFAULT; - else: - timeout = TIMEOUT_DEFAULT; - timeout *= MODES[mode]["timeout_scalefactor"] + # Simulators are slow, therefore allow a longer timeout. + if arch in SLOW_ARCHS: + options.timeout *= 2 + + options.timeout *= MODES[mode]["timeout_scalefactor"] if options.predictable: # Predictable mode is slower. - timeout *= 2 + options.timeout *= 2 # TODO(machenbach): Remove temporary verbose output on windows after # debugging driver-hung-up on XP. @@ -685,7 +741,8 @@ def Execute(arch, mode, args, options, suites): ) ctx = context.Context(arch, MODES[mode]["execution_mode"], shell_dir, mode_flags, verbose_output, - timeout, options.isolates, + options.timeout, + options.isolates, options.command_prefix, options.extra_flags, options.no_i18n, @@ -699,6 +756,8 @@ def Execute(arch, mode, args, options, suites): sancov_dir=options.sancov_dir) # TODO(all): Combine "simulator" and "simulator_run". + # TODO(machenbach): In GN we can derive simulator run from + # target_arch != v8_target_arch in the dumped build config. simulator_run = not options.dont_skip_simulator_slow_tests and \ arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', \ 'ppc', 'ppc64'] and \ @@ -710,7 +769,6 @@ def Execute(arch, mode, args, options, suites): "deopt_fuzzer": False, "gc_stress": options.gc_stress, "gcov_coverage": options.gcov_coverage, - "ignition": options.ignition, "isolates": options.isolates, "mode": MODES[mode]["status_mode"], "no_i18n": options.no_i18n, @@ -733,8 +791,12 @@ def Execute(arch, mode, args, options, suites): if len(args) > 0: s.FilterTestCasesByArgs(args) all_tests += s.tests + + # First filtering by status applying the generic rules (independent of + # variants). s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests, options.pass_fail_tests) + if options.cat: verbose.PrintTestSource(s.tests) continue @@ -762,6 +824,10 @@ def iter_seed_flags(): else: s.tests = variant_tests + # Second filtering by status applying the variant-dependent rules. + s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests, + options.pass_fail_tests, variants=True) + s.tests = ShardTests(s.tests, options) num_tests += len(s.tests) diff --git a/deps/v8/tools/run-valgrind.gyp b/deps/v8/tools/run-valgrind.gyp index d06be933a9d3ab..02dd26d22ca186 100644 --- a/deps/v8/tools/run-valgrind.gyp +++ b/deps/v8/tools/run-valgrind.gyp @@ -13,8 +13,8 @@ '../src/d8.gyp:d8_run', ], 'includes': [ - '../build/features.gypi', - '../build/isolate.gypi', + '../gypfiles/features.gypi', + '../gypfiles/isolate.gypi', ], 'sources': [ 'run-valgrind.isolate', diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py index db4245f499fc07..2b406bd7826ef6 100755 --- a/deps/v8/tools/run_perf.py +++ b/deps/v8/tools/run_perf.py @@ -113,8 +113,6 @@ "ia32", "mips", "mipsel", - "nacl_ia32", - "nacl_x64", "x64", "arm64"] @@ -128,17 +126,20 @@ def LoadAndroidBuildTools(path): # pragma: no cover assert os.path.exists(path) sys.path.insert(0, path) - from pylib.device import adb_wrapper # pylint: disable=F0401 - from pylib.device import device_errors # pylint: disable=F0401 - from pylib.device import device_utils # pylint: disable=F0401 - from pylib.perf import cache_control # pylint: disable=F0401 - from pylib.perf import perf_control # pylint: disable=F0401 + import devil_chromium + from devil.android import device_errors # pylint: disable=import-error + from devil.android import device_utils # pylint: disable=import-error + from devil.android.sdk import adb_wrapper # pylint: disable=import-error + from devil.android.perf import cache_control # pylint: disable=import-error + from devil.android.perf import perf_control # pylint: disable=import-error global adb_wrapper global cache_control global device_errors global device_utils global perf_control + devil_chromium.Initialize() + def GeometricMean(values): """Returns the geometric mean of a list of values. @@ -612,6 +613,21 @@ def Run(self, runnable, count): class DesktopPlatform(Platform): def __init__(self, options): super(DesktopPlatform, self).__init__(options) + self.command_prefix = [] + + if options.prioritize or options.affinitize != None: + self.command_prefix = ["schedtool"] + if options.prioritize: + self.command_prefix += ["-n", "-20"] + if options.affinitize != None: + # schedtool expects a bit pattern when setting affinity, where each + # bit set to '1' corresponds to a core where the process may run on. + # First bit corresponds to CPU 0. Since the 'affinitize' parameter is + # a core number, we need to map to said bit pattern. + cpu = int(options.affinitize) + core = 1 << cpu + self.command_prefix += ["-a", ("0x%x" % core)] + self.command_prefix += ["-e"] def PreExecution(self): pass @@ -627,15 +643,18 @@ def _Run(self, runnable, count, no_patch=False): suffix = ' - without patch' if no_patch else '' shell_dir = self.shell_dir_no_patch if no_patch else self.shell_dir title = ">>> %%s (#%d)%s:" % ((count + 1), suffix) + command = self.command_prefix + runnable.GetCommand(shell_dir, + self.extra_flags) try: output = commands.Execute( - runnable.GetCommand(shell_dir, self.extra_flags), - timeout=runnable.timeout, + command, + timeout=runnable.timeout, ) except OSError as e: # pragma: no cover print title % "OSError" print e return "" + print title % "Stdout" print output.stdout if output.stderr: # pragma: no cover @@ -788,6 +807,109 @@ def _Run(self, runnable, count, no_patch=False): stdout = "" return stdout +class CustomMachineConfiguration: + def __init__(self, disable_aslr = False, governor = None): + self.aslr_backup = None + self.governor_backup = None + self.disable_aslr = disable_aslr + self.governor = governor + + def __enter__(self): + if self.disable_aslr: + self.aslr_backup = CustomMachineConfiguration.GetASLR() + CustomMachineConfiguration.SetASLR(0) + if self.governor != None: + self.governor_backup = CustomMachineConfiguration.GetCPUGovernor() + CustomMachineConfiguration.SetCPUGovernor(self.governor) + return self + + def __exit__(self, type, value, traceback): + if self.aslr_backup != None: + CustomMachineConfiguration.SetASLR(self.aslr_backup) + if self.governor_backup != None: + CustomMachineConfiguration.SetCPUGovernor(self.governor_backup) + + @staticmethod + def GetASLR(): + try: + with open("/proc/sys/kernel/randomize_va_space", "r") as f: + return int(f.readline().strip()) + except Exception as e: + print "Failed to get current ASLR settings." + raise e + + @staticmethod + def SetASLR(value): + try: + with open("/proc/sys/kernel/randomize_va_space", "w") as f: + f.write(str(value)) + except Exception as e: + print "Failed to update ASLR to %s." % value + print "Are we running under sudo?" + raise e + + new_value = CustomMachineConfiguration.GetASLR() + if value != new_value: + raise Exception("Present value is %s" % new_value) + + @staticmethod + def GetCPUCoresRange(): + try: + with open("/sys/devices/system/cpu/present", "r") as f: + indexes = f.readline() + r = map(int, indexes.split("-")) + if len(r) == 1: + return range(r[0], r[0] + 1) + return range(r[0], r[1] + 1) + except Exception as e: + print "Failed to retrieve number of CPUs." + raise e + + @staticmethod + def GetCPUPathForId(cpu_index): + ret = "/sys/devices/system/cpu/cpu" + ret += str(cpu_index) + ret += "/cpufreq/scaling_governor" + return ret + + @staticmethod + def GetCPUGovernor(): + try: + cpu_indices = CustomMachineConfiguration.GetCPUCoresRange() + ret = None + for cpu_index in cpu_indices: + cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index) + with open(cpu_device, "r") as f: + # We assume the governors of all CPUs are set to the same value + val = f.readline().strip() + if ret == None: + ret = val + elif ret != val: + raise Exception("CPU cores have differing governor settings") + return ret + except Exception as e: + print "Failed to get the current CPU governor." + print "Is the CPU governor disabled? Check BIOS." + raise e + + @staticmethod + def SetCPUGovernor(value): + try: + cpu_indices = CustomMachineConfiguration.GetCPUCoresRange() + for cpu_index in cpu_indices: + cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index) + with open(cpu_device, "w") as f: + f.write(value) + + except Exception as e: + print "Failed to change CPU governor to %s." % value + print "Are we running under sudo?" + raise e + + cur_value = CustomMachineConfiguration.GetCPUGovernor() + if cur_value != value: + raise Exception("Could not set CPU governor. Present value is %s" + % cur_value ) # TODO: Implement results_processor. def Main(args): @@ -822,6 +944,27 @@ def Main(args): help="JavaScript engine binary. By default, d8 under " "architecture-specific build dir. " "Not supported in conjunction with outdir-no-patch.") + parser.add_option("--prioritize", + help="Raise the priority to nice -20 for the benchmarking " + "process.Requires Linux, schedtool, and sudo privileges.", + default=False, action="store_true") + parser.add_option("--affinitize", + help="Run benchmarking process on the specified core. " + "For example: " + "--affinitize=0 will run the benchmark process on core 0. " + "--affinitize=3 will run the benchmark process on core 3. " + "Requires Linux, schedtool, and sudo privileges.", + default=None) + parser.add_option("--noaslr", + help="Disable ASLR for the duration of the benchmarked " + "process. Requires Linux and sudo privileges.", + default=False, action="store_true") + parser.add_option("--cpu-governor", + help="Set cpu governor to specified policy for the " + "duration of the benchmarked process. Typical options: " + "'powersave' for more stable results, or 'performance' " + "for shorter completion time of suite, with potentially " + "more noise in results.") (options, args) = parser.parse_args(args) @@ -872,56 +1015,60 @@ def Main(args): else: options.shell_dir_no_patch = None + prev_aslr = None + prev_cpu_gov = None platform = Platform.GetPlatform(options) results = Results() results_no_patch = Results() - for path in args: - path = os.path.abspath(path) + with CustomMachineConfiguration(governor = options.cpu_governor, + disable_aslr = options.noaslr) as conf: + for path in args: + path = os.path.abspath(path) - if not os.path.exists(path): # pragma: no cover - results.errors.append("Configuration file %s does not exist." % path) - continue + if not os.path.exists(path): # pragma: no cover + results.errors.append("Configuration file %s does not exist." % path) + continue - with open(path) as f: - suite = json.loads(f.read()) + with open(path) as f: + suite = json.loads(f.read()) - # If no name is given, default to the file name without .json. - suite.setdefault("name", os.path.splitext(os.path.basename(path))[0]) + # If no name is given, default to the file name without .json. + suite.setdefault("name", os.path.splitext(os.path.basename(path))[0]) - # Setup things common to one test suite. - platform.PreExecution() + # Setup things common to one test suite. + platform.PreExecution() - # Build the graph/trace tree structure. - default_parent = DefaultSentinel(default_binary_name) - root = BuildGraphConfigs(suite, options.arch, default_parent) + # Build the graph/trace tree structure. + default_parent = DefaultSentinel(default_binary_name) + root = BuildGraphConfigs(suite, options.arch, default_parent) - # Callback to be called on each node on traversal. - def NodeCB(node): - platform.PreTests(node, path) + # Callback to be called on each node on traversal. + def NodeCB(node): + platform.PreTests(node, path) - # Traverse graph/trace tree and interate over all runnables. - for runnable in FlattenRunnables(root, NodeCB): - print ">>> Running suite: %s" % "/".join(runnable.graphs) + # Traverse graph/trace tree and interate over all runnables. + for runnable in FlattenRunnables(root, NodeCB): + print ">>> Running suite: %s" % "/".join(runnable.graphs) - def Runner(): - """Output generator that reruns several times.""" - for i in xrange(0, max(1, runnable.run_count)): - # TODO(machenbach): Allow timeout per arch like with run_count per - # arch. - yield platform.Run(runnable, i) + def Runner(): + """Output generator that reruns several times.""" + for i in xrange(0, max(1, runnable.run_count)): + # TODO(machenbach): Allow timeout per arch like with run_count per + # arch. + yield platform.Run(runnable, i) - # Let runnable iterate over all runs and handle output. - result, result_no_patch = runnable.Run( + # Let runnable iterate over all runs and handle output. + result, result_no_patch = runnable.Run( Runner, trybot=options.shell_dir_no_patch) - results += result - results_no_patch += result_no_patch - platform.PostExecution() - - if options.json_test_results: - results.WriteToFile(options.json_test_results) - else: # pragma: no cover - print results + results += result + results_no_patch += result_no_patch + platform.PostExecution() + + if options.json_test_results: + results.WriteToFile(options.json_test_results) + else: # pragma: no cover + print results if options.json_test_results_no_patch: results_no_patch.WriteToFile(options.json_test_results_no_patch) diff --git a/deps/v8/tools/testrunner/local/commands.py b/deps/v8/tools/testrunner/local/commands.py index e725d112f95593..a9315cb78ce53c 100644 --- a/deps/v8/tools/testrunner/local/commands.py +++ b/deps/v8/tools/testrunner/local/commands.py @@ -111,8 +111,8 @@ def kill_process(process, timeout_result): return output.Output( process.returncode, timeout_result[0], - stdout, - stderr, + stdout.decode('utf-8', 'replace').encode('utf-8'), + stderr.decode('utf-8', 'replace').encode('utf-8'), process.pid, ) diff --git a/deps/v8/tools/testrunner/local/execution.py b/deps/v8/tools/testrunner/local/execution.py index e0aec0bb9018a8..f3d11a8b5cf377 100644 --- a/deps/v8/tools/testrunner/local/execution.py +++ b/deps/v8/tools/testrunner/local/execution.py @@ -248,7 +248,6 @@ def _MaybeRerun(self, pool, test): self.total += 1 def _ProcessTestNormal(self, test, result, pool): - self.indicator.AboutToRun(test) test.output = result[1] test.duration = result[2] has_unexpected_output = test.suite.HasUnexpectedOutput(test) @@ -285,7 +284,6 @@ def AllocationStr(stdout): if test.run == 1 and result[1].HasTimedOut(): # If we get a timeout in the first run, we are already in an # unpredictable state. Just report it as a failure and don't rerun. - self.indicator.AboutToRun(test) test.output = result[1] self.remaining -= 1 self.failed.append(test) @@ -294,16 +292,13 @@ def AllocationStr(stdout): # From the second run on, check for different allocations. If a # difference is found, call the indicator twice to report both tests. # All runs of each test are counted as one for the statistic. - self.indicator.AboutToRun(test) self.remaining -= 1 self.failed.append(test) self.indicator.HasRun(test, True) - self.indicator.AboutToRun(test) test.output = result[1] self.indicator.HasRun(test, True) elif test.run >= 3: # No difference on the third run -> report a success. - self.indicator.AboutToRun(test) self.remaining -= 1 self.succeeded += 1 test.output = result[1] diff --git a/deps/v8/tools/testrunner/local/progress.py b/deps/v8/tools/testrunner/local/progress.py index 4e1be3e4cf60d4..33e27e154b31b2 100644 --- a/deps/v8/tools/testrunner/local/progress.py +++ b/deps/v8/tools/testrunner/local/progress.py @@ -34,6 +34,7 @@ from . import execution from . import junit_output +from . import statusfile ABS_PATH_PREFIX = os.getcwd() + os.sep @@ -53,9 +54,6 @@ def Starting(self): def Done(self): pass - def AboutToRun(self, test): - pass - def HasRun(self, test, has_unexpected_output): pass @@ -146,10 +144,6 @@ def Done(self): class VerboseProgressIndicator(SimpleProgressIndicator): - def AboutToRun(self, test): - print 'Starting %s...' % test.GetLabel() - sys.stdout.flush() - def HasRun(self, test, has_unexpected_output): if has_unexpected_output: if test.output.HasCrashed(): @@ -200,10 +194,8 @@ def Done(self): self.PrintProgress('Done') print "" # Line break. - def AboutToRun(self, test): - self.PrintProgress(test.GetLabel()) - def HasRun(self, test, has_unexpected_output): + self.PrintProgress(test.GetLabel()) if has_unexpected_output: self.ClearLine(self.last_status_length) self.PrintFailureHeader(test) @@ -329,6 +321,12 @@ def Done(self): # Buildbot might start out with an empty file. complete_results = json.loads(f.read() or "[]") + duration_mean = None + if self.tests: + # Get duration mean. + duration_mean = ( + sum(t.duration for t in self.tests) / float(len(self.tests))) + # Sort tests by duration. timed_tests = [t for t in self.tests if t.duration is not None] timed_tests.sort(lambda a, b: cmp(b.duration, a.duration)) @@ -338,6 +336,7 @@ def Done(self): "flags": test.flags, "command": self._EscapeCommand(test).replace(ABS_PATH_PREFIX, ""), "duration": test.duration, + "marked_slow": statusfile.IsSlow(test.outcomes), } for test in timed_tests[:20] ] @@ -346,6 +345,8 @@ def Done(self): "mode": self.mode, "results": self.results, "slowest_tests": slowest_tests, + "duration_mean": duration_mean, + "test_total": len(self.tests), }) with open(self.json_test_results, "w") as f: diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py index 7e96cc37150058..091754043bc048 100644 --- a/deps/v8/tools/testrunner/local/statusfile.py +++ b/deps/v8/tools/testrunner/local/statusfile.py @@ -26,6 +26,10 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os +import re + +from variants import ALL_VARIANTS +from utils import Freeze # These outcomes can occur in a TestCase's outcomes list: SKIP = "SKIP" @@ -57,8 +61,12 @@ for var in ["debug", "release", "big", "little", "android_arm", "android_arm64", "android_ia32", "android_x87", "android_x64", "arm", "arm64", "ia32", "mips", "mipsel", "mips64", - "mips64el", "x64", "x87", "nacl_ia32", "nacl_x64", "ppc", "ppc64", - "s390", "s390x", "macos", "windows", "linux", "aix"]: + "mips64el", "x64", "x87", "ppc", "ppc64", "s390", "s390x", "macos", + "windows", "linux", "aix"]: + VARIABLES[var] = var + +# Allow using variants as keywords. +for var in ALL_VARIANTS: VARIABLES[var] = var @@ -100,6 +108,44 @@ def _AddOutcome(result, new): result.add(new) +def _JoinsPassAndFail(outcomes1, outcomes2): + """Indicates if we join PASS and FAIL from two different outcome sets and + the first doesn't already contain both. + """ + return ( + PASS in outcomes1 and + not FAIL in outcomes1 and + FAIL in outcomes2 + ) + +VARIANT_EXPRESSION = object() + +def _EvalExpression(exp, variables): + try: + return eval(exp, variables) + except NameError as e: + identifier = re.match("name '(.*)' is not defined", e.message).group(1) + assert identifier == "variant", "Unknown identifier: %s" % identifier + return VARIANT_EXPRESSION + + +def _EvalVariantExpression(section, rules, wildcards, variant, variables): + variables_with_variant = {} + variables_with_variant.update(variables) + variables_with_variant["variant"] = variant + result = _EvalExpression(section[0], variables_with_variant) + assert result != VARIANT_EXPRESSION + if result is True: + _ReadSection( + section[1], + rules[variant], + wildcards[variant], + variables_with_variant, + ) + else: + assert result is False, "Make sure expressions evaluate to boolean values" + + def _ParseOutcomeList(rule, outcomes, target_dict, variables): result = set([]) if type(outcomes) == str: @@ -108,7 +154,16 @@ def _ParseOutcomeList(rule, outcomes, target_dict, variables): if type(item) == str: _AddOutcome(result, item) elif type(item) == list: - if not eval(item[0], variables): continue + exp = _EvalExpression(item[0], variables) + assert exp != VARIANT_EXPRESSION, ( + "Nested variant expressions are not supported") + if exp is False: + continue + + # Ensure nobody uses an identifier by mistake, like "default", + # which would evaluate to true here otherwise. + assert exp is True, "Make sure expressions evaluate to boolean values" + for outcome in item[1:]: assert type(outcome) == str _AddOutcome(result, outcome) @@ -116,40 +171,71 @@ def _ParseOutcomeList(rule, outcomes, target_dict, variables): assert False if len(result) == 0: return if rule in target_dict: + # A FAIL without PASS in one rule has always precedence over a single + # PASS (without FAIL) in another. Otherwise the default PASS expectation + # in a rule with a modifier (e.g. PASS, SLOW) would be joined to a FAIL + # from another rule (which intended to mark a test as FAIL and not as + # PASS and FAIL). + if _JoinsPassAndFail(target_dict[rule], result): + target_dict[rule] -= set([PASS]) + if _JoinsPassAndFail(result, target_dict[rule]): + result -= set([PASS]) target_dict[rule] |= result else: target_dict[rule] = result -def ReadContent(path): - with open(path) as f: - global KEYWORDS - return eval(f.read(), KEYWORDS) +def ReadContent(content): + global KEYWORDS + return eval(content, KEYWORDS) -def ReadStatusFile(path, variables): - contents = ReadContent(path) +def ReadStatusFile(content, variables): + # Empty defaults for rules and wildcards. Variant-independent + # rules are mapped by "", others by the variant name. + rules = {variant: {} for variant in ALL_VARIANTS} + rules[""] = {} + wildcards = {variant: {} for variant in ALL_VARIANTS} + wildcards[""] = {} - rules = {} - wildcards = {} variables.update(VARIABLES) - for section in contents: + for section in ReadContent(content): assert type(section) == list assert len(section) == 2 - if not eval(section[0], variables): continue - section = section[1] - assert type(section) == dict - for rule in section: - assert type(rule) == str - if rule[-1] == '*': - _ParseOutcomeList(rule, section[rule], wildcards, variables) - else: - _ParseOutcomeList(rule, section[rule], rules, variables) - return rules, wildcards + exp = _EvalExpression(section[0], variables) + if exp is False: + # The expression is variant-independent and evaluates to False. + continue + elif exp == VARIANT_EXPRESSION: + # If the expression contains one or more "variant" keywords, we evaluate + # it for all possible variants and create rules for those that apply. + for variant in ALL_VARIANTS: + _EvalVariantExpression(section, rules, wildcards, variant, variables) + else: + # The expression is variant-independent and evaluates to True. + assert exp is True, "Make sure expressions evaluate to boolean values" + _ReadSection( + section[1], + rules[""], + wildcards[""], + variables, + ) + return Freeze(rules), Freeze(wildcards) + + +def _ReadSection(section, rules, wildcards, variables): + assert type(section) == dict + for rule in section: + assert type(rule) == str + if rule[-1] == '*': + _ParseOutcomeList(rule, section[rule], wildcards, variables) + else: + _ParseOutcomeList(rule, section[rule], rules, variables) def PresubmitCheck(path): - contents = ReadContent(path) + with open(path) as f: + contents = ReadContent(f.read()) root_prefix = os.path.basename(os.path.dirname(path)) + "/" status = {"success": True} def _assert(check, message): # Like "assert", but doesn't throw. diff --git a/deps/v8/tools/testrunner/local/statusfile_unittest.py b/deps/v8/tools/testrunner/local/statusfile_unittest.py new file mode 100755 index 00000000000000..f64ab3425ef19a --- /dev/null +++ b/deps/v8/tools/testrunner/local/statusfile_unittest.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python +# Copyright 2016 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import unittest + +import statusfile +from utils import Freeze + + +TEST_VARIABLES = { + 'system': 'linux', + 'mode': 'release', +} + + +TEST_STATUS_FILE = """ +[ +[ALWAYS, { + 'foo/bar': [PASS, SKIP], + 'baz/bar': [PASS, FAIL], + 'foo/*': [PASS, SLOW], +}], # ALWAYS + +['%s', { + 'baz/bar': [PASS, SLOW], + 'foo/*': [FAIL], +}], +] +""" + + +def make_variables(): + variables = {} + variables.update(TEST_VARIABLES) + return variables + + +class UtilsTest(unittest.TestCase): + def test_freeze(self): + self.assertEqual(2, Freeze({1: [2]})[1][0]) + self.assertEqual(set([3]), Freeze({1: [2], 2: set([3])})[2]) + + with self.assertRaises(Exception): + Freeze({1: [], 2: set([3])})[2] = 4 + with self.assertRaises(Exception): + Freeze({1: [], 2: set([3])}).update({3: 4}) + with self.assertRaises(Exception): + Freeze({1: [], 2: set([3])})[1].append(2) + with self.assertRaises(Exception): + Freeze({1: [], 2: set([3])})[2] |= set([3]) + + # Sanity check that we can do the same calls on a non-frozen object. + {1: [], 2: set([3])}[2] = 4 + {1: [], 2: set([3])}.update({3: 4}) + {1: [], 2: set([3])}[1].append(2) + {1: [], 2: set([3])}[2] |= set([3]) + + +class StatusFileTest(unittest.TestCase): + def test_eval_expression(self): + variables = make_variables() + variables.update(statusfile.VARIABLES) + + self.assertTrue( + statusfile._EvalExpression( + 'system==linux and mode==release', variables)) + self.assertTrue( + statusfile._EvalExpression( + 'system==linux or variant==default', variables)) + self.assertFalse( + statusfile._EvalExpression( + 'system==linux and mode==debug', variables)) + self.assertRaises( + AssertionError, + lambda: statusfile._EvalExpression( + 'system==linux and mode==foo', variables)) + self.assertRaises( + SyntaxError, + lambda: statusfile._EvalExpression( + 'system==linux and mode=release', variables)) + self.assertEquals( + statusfile.VARIANT_EXPRESSION, + statusfile._EvalExpression( + 'system==linux and variant==default', variables) + ) + + def test_read_statusfile_section_true(self): + rules, wildcards = statusfile.ReadStatusFile( + TEST_STATUS_FILE % 'system==linux', make_variables()) + + self.assertEquals( + { + 'foo/bar': set(['PASS', 'SKIP']), + 'baz/bar': set(['PASS', 'FAIL', 'SLOW']), + }, + rules[''], + ) + self.assertEquals( + { + 'foo/*': set(['SLOW', 'FAIL']), + }, + wildcards[''], + ) + self.assertEquals({}, rules['default']) + self.assertEquals({}, wildcards['default']) + + def test_read_statusfile_section_false(self): + rules, wildcards = statusfile.ReadStatusFile( + TEST_STATUS_FILE % 'system==windows', make_variables()) + + self.assertEquals( + { + 'foo/bar': set(['PASS', 'SKIP']), + 'baz/bar': set(['PASS', 'FAIL']), + }, + rules[''], + ) + self.assertEquals( + { + 'foo/*': set(['PASS', 'SLOW']), + }, + wildcards[''], + ) + self.assertEquals({}, rules['default']) + self.assertEquals({}, wildcards['default']) + + def test_read_statusfile_section_variant(self): + rules, wildcards = statusfile.ReadStatusFile( + TEST_STATUS_FILE % 'system==linux and variant==default', + make_variables(), + ) + + self.assertEquals( + { + 'foo/bar': set(['PASS', 'SKIP']), + 'baz/bar': set(['PASS', 'FAIL']), + }, + rules[''], + ) + self.assertEquals( + { + 'foo/*': set(['PASS', 'SLOW']), + }, + wildcards[''], + ) + self.assertEquals( + { + 'baz/bar': set(['PASS', 'SLOW']), + }, + rules['default'], + ) + self.assertEquals( + { + 'foo/*': set(['FAIL']), + }, + wildcards['default'], + ) + + +if __name__ == '__main__': + unittest.main() diff --git a/deps/v8/tools/testrunner/local/testsuite.py b/deps/v8/tools/testrunner/local/testsuite.py index f43d008b220bc5..11d220742779f0 100644 --- a/deps/v8/tools/testrunner/local/testsuite.py +++ b/deps/v8/tools/testrunner/local/testsuite.py @@ -33,30 +33,9 @@ from . import statusfile from . import utils from ..objects import testcase +from variants import ALL_VARIANTS, ALL_VARIANT_FLAGS, FAST_VARIANT_FLAGS + -# Use this to run several variants of the tests. -ALL_VARIANT_FLAGS = { - "default": [[]], - "stress": [["--stress-opt", "--always-opt"]], - "turbofan": [["--turbo"]], - "turbofan_opt": [["--turbo", "--always-opt"]], - "nocrankshaft": [["--nocrankshaft"]], - "ignition": [["--ignition", "--turbo"]], - "preparser": [["--min-preparse-length=0"]], -} - -# FAST_VARIANTS implies no --always-opt. -FAST_VARIANT_FLAGS = { - "default": [[]], - "stress": [["--stress-opt"]], - "turbofan": [["--turbo"]], - "nocrankshaft": [["--nocrankshaft"]], - "ignition": [["--ignition", "--turbo"]], - "preparser": [["--min-preparse-length=0"]], -} - -ALL_VARIANTS = set(["default", "stress", "turbofan", "turbofan_opt", - "nocrankshaft", "ignition", "preparser"]) FAST_VARIANTS = set(["default", "turbofan"]) STANDARD_VARIANT = set(["default"]) @@ -69,12 +48,13 @@ def __init__(self, suite, variants): self.standard_variant = STANDARD_VARIANT & variants def FilterVariantsByTest(self, testcase): - if testcase.outcomes and statusfile.OnlyStandardVariant( - testcase.outcomes): - return self.standard_variant - if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes): - return self.fast_variants - return self.all_variants + result = self.all_variants + if testcase.outcomes: + if statusfile.OnlyStandardVariant(testcase.outcomes): + return self.standard_variant + if statusfile.OnlyFastVariants(testcase.outcomes): + result = self.fast_variants + return result def GetFlagSets(self, testcase, variant): if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes): @@ -142,12 +122,21 @@ def CreateVariantGenerator(self, variants): """ return self._VariantGeneratorFactory()(self, set(variants)) + def PrepareSources(self): + """Called once before multiprocessing for doing file-system operations. + + This should not access the network. For network access use the method + below. + """ + pass + def DownloadData(self): pass def ReadStatusFile(self, variables): - (self.rules, self.wildcards) = \ - statusfile.ReadStatusFile(self.status_file(), variables) + with open(self.status_file()) as f: + self.rules, self.wildcards = ( + statusfile.ReadStatusFile(f.read(), variables)) def ReadTestCases(self, context): self.tests = self.ListTests(context) @@ -162,18 +151,40 @@ def _FilterPassFail(pass_fail, mode): def FilterTestCasesByStatus(self, warn_unused_rules, slow_tests="dontcare", - pass_fail_tests="dontcare"): + pass_fail_tests="dontcare", + variants=False): + + # Use only variants-dependent rules and wildcards when filtering + # respective test cases and generic rules when filtering generic test + # cases. + if not variants: + rules = self.rules[""] + wildcards = self.wildcards[""] + else: + # We set rules and wildcards to a variant-specific version for each test + # below. + rules = {} + wildcards = {} + filtered = [] + + # Remember used rules as tuples of (rule, variant), where variant is "" for + # variant-independent rules. used_rules = set() + for t in self.tests: slow = False pass_fail = False testname = self.CommonTestName(t) - if testname in self.rules: - used_rules.add(testname) + variant = t.variant or "" + if variants: + rules = self.rules[variant] + wildcards = self.wildcards[variant] + if testname in rules: + used_rules.add((testname, variant)) # Even for skipped tests, as the TestCase object stays around and # PrintReport() uses it. - t.outcomes = self.rules[testname] + t.outcomes = t.outcomes | rules[testname] if statusfile.DoSkip(t.outcomes): continue # Don't add skipped tests to |filtered|. for outcome in t.outcomes: @@ -182,14 +193,14 @@ def FilterTestCasesByStatus(self, warn_unused_rules, slow = statusfile.IsSlow(t.outcomes) pass_fail = statusfile.IsPassOrFail(t.outcomes) skip = False - for rule in self.wildcards: + for rule in wildcards: assert rule[-1] == '*' if testname.startswith(rule[:-1]): - used_rules.add(rule) - t.outcomes |= self.wildcards[rule] + used_rules.add((rule, variant)) + t.outcomes = t.outcomes | wildcards[rule] if statusfile.DoSkip(t.outcomes): skip = True - break # "for rule in self.wildcards" + break # "for rule in wildcards" slow = slow or statusfile.IsSlow(t.outcomes) pass_fail = pass_fail or statusfile.IsPassOrFail(t.outcomes) if (skip @@ -202,12 +213,26 @@ def FilterTestCasesByStatus(self, warn_unused_rules, if not warn_unused_rules: return - for rule in self.rules: - if rule not in used_rules: - print("Unused rule: %s -> %s" % (rule, self.rules[rule])) - for rule in self.wildcards: - if rule not in used_rules: - print("Unused rule: %s -> %s" % (rule, self.wildcards[rule])) + if not variants: + for rule in self.rules[""]: + if (rule, "") not in used_rules: + print("Unused rule: %s -> %s (variant independent)" % ( + rule, self.rules[""][rule])) + for rule in self.wildcards[""]: + if (rule, "") not in used_rules: + print("Unused rule: %s -> %s (variant independent)" % ( + rule, self.wildcards[""][rule])) + else: + for variant in ALL_VARIANTS: + for rule in self.rules[variant]: + if (rule, variant) not in used_rules: + print("Unused rule: %s -> %s (variant: %s)" % ( + rule, self.rules[variant][rule], variant)) + for rule in self.wildcards[variant]: + if (rule, variant) not in used_rules: + print("Unused rule: %s -> %s (variant: %s)" % ( + rule, self.wildcards[variant][rule], variant)) + def FilterTestCasesByArgs(self, args): """Filter test cases based on command-line arguments. diff --git a/deps/v8/tools/testrunner/local/testsuite_unittest.py b/deps/v8/tools/testrunner/local/testsuite_unittest.py new file mode 100755 index 00000000000000..1e10ef5564e375 --- /dev/null +++ b/deps/v8/tools/testrunner/local/testsuite_unittest.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python +# Copyright 2016 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import os +import sys +import unittest + +# Needed because the test runner contains relative imports. +TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname( + os.path.abspath(__file__)))) +sys.path.append(TOOLS_PATH) + +from testrunner.local.testsuite import TestSuite +from testrunner.objects.testcase import TestCase + + +class TestSuiteTest(unittest.TestCase): + def test_filter_testcases_by_status_first_pass(self): + suite = TestSuite('foo', 'bar') + suite.tests = [ + TestCase(suite, 'foo/bar'), + TestCase(suite, 'baz/bar'), + ] + suite.rules = { + '': { + 'foo/bar': set(['PASS', 'SKIP']), + 'baz/bar': set(['PASS', 'FAIL']), + }, + } + suite.wildcards = { + '': { + 'baz/*': set(['PASS', 'SLOW']), + }, + } + suite.FilterTestCasesByStatus(warn_unused_rules=False) + self.assertEquals( + [TestCase(suite, 'baz/bar')], + suite.tests, + ) + self.assertEquals(set(['PASS', 'FAIL', 'SLOW']), suite.tests[0].outcomes) + + def test_filter_testcases_by_status_second_pass(self): + suite = TestSuite('foo', 'bar') + + test1 = TestCase(suite, 'foo/bar') + test2 = TestCase(suite, 'baz/bar') + + # Contrived outcomes from filtering by variant-independent rules. + test1.outcomes = set(['PREV']) + test2.outcomes = set(['PREV']) + + suite.tests = [ + test1.CopyAddingFlags(variant='default', flags=[]), + test1.CopyAddingFlags(variant='stress', flags=['-v']), + test2.CopyAddingFlags(variant='default', flags=[]), + test2.CopyAddingFlags(variant='stress', flags=['-v']), + ] + + suite.rules = { + 'default': { + 'foo/bar': set(['PASS', 'SKIP']), + 'baz/bar': set(['PASS', 'FAIL']), + }, + 'stress': { + 'baz/bar': set(['SKIP']), + }, + } + suite.wildcards = { + 'default': { + 'baz/*': set(['PASS', 'SLOW']), + }, + 'stress': { + 'foo/*': set(['PASS', 'SLOW']), + }, + } + suite.FilterTestCasesByStatus(warn_unused_rules=False, variants=True) + self.assertEquals( + [ + TestCase(suite, 'foo/bar', flags=['-v']), + TestCase(suite, 'baz/bar'), + ], + suite.tests, + ) + + self.assertEquals( + set(['PASS', 'SLOW', 'PREV']), + suite.tests[0].outcomes, + ) + self.assertEquals( + set(['PASS', 'FAIL', 'SLOW', 'PREV']), + suite.tests[1].outcomes, + ) + + +if __name__ == '__main__': + unittest.main() diff --git a/deps/v8/tools/testrunner/local/utils.py b/deps/v8/tools/testrunner/local/utils.py index c880dfc34ebd83..3e79e44afa22bf 100644 --- a/deps/v8/tools/testrunner/local/utils.py +++ b/deps/v8/tools/testrunner/local/utils.py @@ -136,3 +136,24 @@ def URLRetrieve(source, destination): pass with open(destination, 'w') as f: f.write(urllib2.urlopen(source).read()) + + +class FrozenDict(dict): + def __setitem__(self, *args, **kwargs): + raise Exception('Tried to mutate a frozen dict') + + def update(self, *args, **kwargs): + raise Exception('Tried to mutate a frozen dict') + + +def Freeze(obj): + if isinstance(obj, dict): + return FrozenDict((k, Freeze(v)) for k, v in obj.iteritems()) + elif isinstance(obj, set): + return frozenset(obj) + elif isinstance(obj, list): + return tuple(Freeze(item) for item in obj) + else: + # Make sure object is hashable. + hash(obj) + return obj diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py new file mode 100644 index 00000000000000..ea42bf52480522 --- /dev/null +++ b/deps/v8/tools/testrunner/local/variants.py @@ -0,0 +1,34 @@ +# Copyright 2016 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# Use this to run several variants of the tests. +ALL_VARIANT_FLAGS = { + "default": [[]], + "stress": [["--stress-opt", "--always-opt"]], + "turbofan": [["--turbo"]], + "turbofan_opt": [["--turbo", "--always-opt"]], + "nocrankshaft": [["--nocrankshaft"]], + "ignition": [["--ignition"]], + "ignition_staging": [["--ignition-staging"]], + "ignition_turbofan": [["--ignition-staging", "--turbo"]], + "preparser": [["--min-preparse-length=0"]], + "asm_wasm": [["--validate-asm"]], +} + +# FAST_VARIANTS implies no --always-opt. +FAST_VARIANT_FLAGS = { + "default": [[]], + "stress": [["--stress-opt"]], + "turbofan": [["--turbo"]], + "nocrankshaft": [["--nocrankshaft"]], + "ignition": [["--ignition"]], + "ignition_staging": [["--ignition-staging"]], + "ignition_turbofan": [["--ignition-staging", "--turbo"]], + "preparser": [["--min-preparse-length=0"]], + "asm_wasm": [["--validate-asm"]], +} + +ALL_VARIANTS = set(["default", "stress", "turbofan", "turbofan_opt", + "nocrankshaft", "ignition", "ignition_staging", + "ignition_turbofan", "preparser", "asm_wasm"]) diff --git a/deps/v8/tools/testrunner/network/network_execution.py b/deps/v8/tools/testrunner/network/network_execution.py index c842aba579c4dd..a95440178b4aa6 100644 --- a/deps/v8/tools/testrunner/network/network_execution.py +++ b/deps/v8/tools/testrunner/network/network_execution.py @@ -203,7 +203,6 @@ def _TalkToPeer(self, peer): [constants.INFORM_DURATION, perf_key, test.duration, self.context.arch, self.context.mode], self.local_socket) - self.indicator.AboutToRun(test) has_unexpected_output = test.suite.HasUnexpectedOutput(test) if has_unexpected_output: self.failed.append(test) diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py index 113c624a35d66d..00722d768b3b44 100644 --- a/deps/v8/tools/testrunner/objects/testcase.py +++ b/deps/v8/tools/testrunner/objects/testcase.py @@ -29,14 +29,14 @@ from . import output class TestCase(object): - def __init__(self, suite, path, variant='default', flags=None, + def __init__(self, suite, path, variant=None, flags=None, override_shell=None): self.suite = suite # TestSuite object self.path = path # string, e.g. 'div-mod', 'test-api/foo' self.flags = flags or [] # list of strings, flags specific to this test self.variant = variant # name of the used testing variant self.override_shell = override_shell - self.outcomes = set([]) + self.outcomes = frozenset([]) self.output = None self.id = None # int, used to map result back to TestCase instance self.duration = None # assigned during execution @@ -63,7 +63,7 @@ def UnpackTask(task): """Creates a new TestCase object based on packed task data.""" # For the order of the fields, refer to PackTask() above. test = TestCase(str(task[0]), task[1], task[2], task[3], task[4]) - test.outcomes = set(task[5]) + test.outcomes = frozenset(task[5]) test.id = task[6] test.run = 1 return test @@ -108,3 +108,6 @@ def __cmp__(self, other): (self.suite.name, self.path, self.flags), (other.suite.name, other.path, other.flags), ) + + def __str__(self): + return "[%s/%s %s]" % (self.suite.name, self.path, self.flags) diff --git a/deps/v8/tools/testrunner/server/main.py b/deps/v8/tools/testrunner/server/main.py index 1000713ca93d4c..c237e1adb4c89e 100644 --- a/deps/v8/tools/testrunner/server/main.py +++ b/deps/v8/tools/testrunner/server/main.py @@ -221,7 +221,7 @@ def AcceptNewTrusted(self, data): if not self.IsTrusted(signer): return if self.IsTrusted(fingerprint): - return # Already trust this guy. + return # Already trusted. filename = self._PubkeyFilename(fingerprint) signer_pubkeyfile = self._PubkeyFilename(signer) if not signatures.VerifySignature(filename, pubkey, signature, diff --git a/deps/v8/tools/testrunner/utils/dump_build_config.py b/deps/v8/tools/testrunner/utils/dump_build_config.py new file mode 100644 index 00000000000000..bd57b5f34e2302 --- /dev/null +++ b/deps/v8/tools/testrunner/utils/dump_build_config.py @@ -0,0 +1,26 @@ +# Copyright 2016 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Writes a dictionary to a json file with the passed key-value pairs. + +Expected to be called like: +dump_build_config.py path/to/file.json [key1=value1 ...] + +The values are expected to be valid json. E.g. true is a boolean and "true" is +the string "true". +""" + +import json +import os +import sys + +assert len(sys.argv) > 1 + +def as_json(kv): + assert '=' in kv + k, v = kv.split('=', 1) + return k, json.loads(v) + +with open(sys.argv[1], 'w') as f: + json.dump(dict(as_json(kv) for kv in sys.argv[2:]), f) diff --git a/deps/v8/tools/tickprocessor-driver.js b/deps/v8/tools/tickprocessor-driver.js index 3f2321fed19d63..be374c9b184040 100644 --- a/deps/v8/tools/tickprocessor-driver.js +++ b/deps/v8/tools/tickprocessor-driver.js @@ -72,6 +72,7 @@ var tickProcessor = new TickProcessor( sourceMap, params.timedRange, params.pairwiseTimedRange, - params.onlySummary); + params.onlySummary, + params.runtimeTimerFilter); tickProcessor.processLogFile(params.logFileName); tickProcessor.printStatistics(); diff --git a/deps/v8/tools/tickprocessor.js b/deps/v8/tools/tickprocessor.js index ba7401a2236dd3..ec56d49d9046aa 100644 --- a/deps/v8/tools/tickprocessor.js +++ b/deps/v8/tools/tickprocessor.js @@ -81,9 +81,10 @@ function TickProcessor( sourceMap, timedRange, pairwiseTimedRange, - onlySummary) { + onlySummary, + runtimeTimerFilter) { LogReader.call(this, { - 'shared-library': { parsers: [null, parseInt, parseInt], + 'shared-library': { parsers: [null, parseInt, parseInt, parseInt], processor: this.processSharedLibrary }, 'code-creation': { parsers: [null, parseInt, parseInt, parseInt, null, 'var-args'], @@ -94,6 +95,9 @@ function TickProcessor( processor: this.processCodeDelete }, 'sfi-move': { parsers: [parseInt, parseInt], processor: this.processFunctionMove }, + 'active-runtime-timer': { + parsers: [null], + processor: this.processRuntimeTimerEvent }, 'tick': { parsers: [parseInt, parseInt, parseInt, parseInt, parseInt, 'var-args'], @@ -124,6 +128,7 @@ function TickProcessor( this.callGraphSize_ = callGraphSize; this.ignoreUnknown_ = ignoreUnknown; this.stateFilter_ = stateFilter; + this.runtimeTimerFilter_ = runtimeTimerFilter; this.sourceMap = sourceMap; this.deserializedEntriesNames_ = []; var ticks = this.ticks_ = @@ -242,13 +247,13 @@ TickProcessor.prototype.processLogFileInTest = function(fileName) { TickProcessor.prototype.processSharedLibrary = function( - name, startAddr, endAddr) { - var entry = this.profile_.addLibrary(name, startAddr, endAddr); + name, startAddr, endAddr, aslrSlide) { + var entry = this.profile_.addLibrary(name, startAddr, endAddr, aslrSlide); this.setCodeType(entry.getName(), 'SHARED_LIB'); var self = this; var libFuncs = this.cppEntriesProvider_.parseVmSymbols( - name, startAddr, endAddr, function(fName, fStart, fEnd) { + name, startAddr, endAddr, aslrSlide, function(fName, fStart, fEnd) { self.profile_.addStaticCode(fName, fStart, fEnd); self.setCodeType(fName, 'CPP'); }); @@ -284,9 +289,18 @@ TickProcessor.prototype.processFunctionMove = function(from, to) { TickProcessor.prototype.includeTick = function(vmState) { - return this.stateFilter_ == null || this.stateFilter_ == vmState; + if (this.stateFilter_ !== null) { + return this.stateFilter_ == vmState; + } else if (this.runtimeTimerFilter_ !== null) { + return this.currentRuntimeTimer == this.runtimeTimerFilter_; + } + return true; }; +TickProcessor.prototype.processRuntimeTimerEvent = function(name) { + this.currentRuntimeTimer = name; +} + TickProcessor.prototype.processTick = function(pc, ns_since_start, is_external_callback, @@ -559,7 +573,7 @@ function CppEntriesProvider() { CppEntriesProvider.prototype.parseVmSymbols = function( - libName, libStart, libEnd, processorFunc) { + libName, libStart, libEnd, libASLRSlide, processorFunc) { this.loadSymbols(libName); var prevEntry; @@ -588,6 +602,7 @@ CppEntriesProvider.prototype.parseVmSymbols = function( } else if (funcInfo === false) { break; } + funcInfo.start += libASLRSlide; if (funcInfo.start < libStart && funcInfo.start < libEnd - libStart) { funcInfo.start += libStart; } @@ -780,6 +795,8 @@ function ArgumentsProcessor(args) { 'Show only ticks from OTHER VM state'], '-e': ['stateFilter', TickProcessor.VmStates.EXTERNAL, 'Show only ticks from EXTERNAL VM state'], + '--filter-runtime-timer': ['runtimeTimerFilter', null, + 'Show only ticks matching the given runtime timer scope'], '--call-graph-size': ['callGraphSize', TickProcessor.CALL_GRAPH_SIZE, 'Set the call graph size'], '--ignore-unknown': ['ignoreUnknown', true, @@ -831,7 +848,8 @@ ArgumentsProcessor.DEFAULTS = { distortion: 0, timedRange: false, pairwiseTimedRange: false, - onlySummary: false + onlySummary: false, + runtimeTimerFilter: null, }; diff --git a/deps/v8/tools/try_perf.py b/deps/v8/tools/try_perf.py index fbd4036dadb49c..05e240edb5b931 100755 --- a/deps/v8/tools/try_perf.py +++ b/deps/v8/tools/try_perf.py @@ -33,17 +33,24 @@ 'emscripten', 'compile', 'jetstream', + 'jetstream-ignition', 'jsbench', 'jstests', 'kraken_orig', + 'kraken_orig-ignition', 'massive', 'memory', 'octane', + 'octane-noopt', + 'octane-ignition', 'octane-pr', 'octane-tf', 'octane-tf-pr', 'simdjs', 'sunspider', + 'sunspider-ignition', + 'unity', + 'wasm', ] V8_BASE = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) diff --git a/deps/v8/tools/turbolizer-perf.py b/deps/v8/tools/turbolizer-perf.py new file mode 100644 index 00000000000000..c90a1174d48769 --- /dev/null +++ b/deps/v8/tools/turbolizer-perf.py @@ -0,0 +1,56 @@ +# Copyright 2016 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import os +import sys +import json +import re +import argparse + +sys.path.append(os.environ['PERF_EXEC_PATH'] + \ + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') + +from perf_trace_context import * +from Core import * + +def trace_begin(): + json_obj['eventCounts'] = {} + prog = re.compile(r'0x[0-9a-fA-F]+') + for phase in reversed(json_obj['phases']): + if phase['name'] == "disassembly": + for line in phase['data'].splitlines(): + result = re.match(prog, line) + if result: + known_addrs.add(result.group(0)) + +def trace_end(): + print json.dumps(json_obj) + +def process_event(param_dict): + addr = "0x%x" % int(param_dict['sample']['ip']) + + # Only count samples that belong to the function + if addr not in known_addrs: + return + + ev_name = param_dict['ev_name'] + if ev_name not in json_obj['eventCounts']: + json_obj['eventCounts'][ev_name] = {} + if addr not in json_obj['eventCounts'][ev_name]: + json_obj['eventCounts'][ev_name][addr] = 0 + json_obj['eventCounts'][ev_name][addr] += 1 + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Perf script to merge profiling data with turbofan compiler " + "traces.") + parser.add_argument("file_name", metavar="JSON File", + help="turbo trace json file.") + + args = parser.parse_args() + + with open(args.file_name, 'r') as json_file: + json_obj = json.load(json_file) + + known_addrs = set() diff --git a/deps/v8/tools/turbolizer/OWNERS b/deps/v8/tools/turbolizer/OWNERS new file mode 100644 index 00000000000000..fc52961eff7c55 --- /dev/null +++ b/deps/v8/tools/turbolizer/OWNERS @@ -0,0 +1 @@ +danno@chromium.org diff --git a/deps/v8/tools/turbolizer/README.md b/deps/v8/tools/turbolizer/README.md new file mode 100644 index 00000000000000..54e1051690ff2f --- /dev/null +++ b/deps/v8/tools/turbolizer/README.md @@ -0,0 +1,62 @@ +Turbolizer +========== + +Turbolizer is a HTML-based tool that visualizes optimized code along the various +phases of Turbofan's optimization pipeline, allowing easy navigation between +source code, Turbofan IR graphs, scheduled IR nodes and generated assembly code. + +Turbolizer consumes .json files that are generated per-function by d8 by passing +the '--trace-turbo' command-line flag. + +Host the turbolizer locally by starting a web server that serves the contents of +the turbolizer directory, e.g.: + + cd src/tools/turbolizer + python -m SimpleHTTPServer 8000 + +Optionally, profiling data generated by the perf tools in linux can be merged +with the .json files using the turbolizer-perf.py file included. The following +command is an example of using the perf script: + + perf script -i perf.data.jitted -s turbolizer-perf.py turbo-main.json + +The output of the above command is a json object that can be piped to a file +which, when uploaded to turbolizer, will display the event counts from perf next +to each instruction in the disassembly. Further detail can be found in the +bottom of this document under "Using Perf with Turbo." + +Using the python interface in perf script requires python-dev to be installed +and perf be recompiled with python support enabled. Once recompiled, the +variable PERF_EXEC_PATH must be set to the location of the recompiled perf +binaries. + +Graph visualization and manipulation based on Mike Bostock's sample code for an +interactive tool for creating directed graphs. Original source is at +https://github.com/metacademy/directed-graph-creator and released under the +MIT/X license. + +Icons derived from the "White Olive Collection" created by Breezi released under +the Creative Commons BY license. + +Using Perf with Turbo +--------------------- + +In order to generate perf data that matches exactly with the turbofan trace, you +must use either a debug build of v8 or a release build with the flag +'disassembler=on'. This flag ensures that the '--trace-turbo' will output the +necessary disassembly for linking with the perf profile. + +The basic example of generating the required data is as follows: + + perf record -k mono /path/to/d8 --turbo --trace-turbo --perf-prof main.js + perf inject -j -i perf.data -o perf.data.jitted + perf script -i perf.data.jitted -s turbolizer-perf.py turbo-main.json + +These commands combined will run and profile d8, merge the output into a single +'perf.data.jitted' file, then take the event data from that and link them to the +disassembly in the 'turbo-main.json'. Note that, as above, the output of the +script command must be piped to a file for uploading to turbolizer. + +There are many options that can be added to the first command, for example '-e' +can be used to specify the counting of specific events (default: cycles), as +well as '--cpu' to specify which CPU to sample. \ No newline at end of file diff --git a/deps/v8/tools/turbolizer/code-view.js b/deps/v8/tools/turbolizer/code-view.js new file mode 100644 index 00000000000000..6abb05593ef5ed --- /dev/null +++ b/deps/v8/tools/turbolizer/code-view.js @@ -0,0 +1,172 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +"use strict"; + +class CodeView extends View { + constructor(divID, PR, sourceText, sourcePosition, broker) { + super(divID, broker, null, false); + let view = this; + view.PR = PR; + view.mouseDown = false; + view.broker = broker; + view.allSpans = []; + + var selectionHandler = { + clear: function() { broker.clear(selectionHandler); }, + select: function(items, selected) { + var handler = this; + var broker = view.broker; + for (let span of items) { + if (selected) { + span.classList.add("selected"); + } else { + span.classList.remove("selected"); + } + } + var locations = []; + for (var span of items) { + locations.push({pos_start: span.start, pos_end: span.end}); + } + broker.clear(selectionHandler); + broker.select(selectionHandler, locations, selected); + }, + selectionDifference: function(span1, inclusive1, span2, inclusive2) { + var pos1 = span1.start; + var pos2 = span2.start; + var result = []; + var lineListDiv = view.divNode.firstChild.firstChild.childNodes; + for (var i = 0; i < lineListDiv.length; i++) { + var currentLineElement = lineListDiv[i]; + var spans = currentLineElement.childNodes; + for (var j = 0; j < spans.length; ++j) { + var currentSpan = spans[j]; + if (currentSpan.start > pos1 || + (inclusive1 && currentSpan.start == pos1)) { + if (currentSpan.start < pos2 || + (inclusive2 && currentSpan.start == pos2)) { + result.push(currentSpan); + } + } + } + } + return result; + }, + brokeredSelect: function(locations, selected) { + let firstSelect = view.selection.isEmpty(); + for (let location of locations) { + let start = location.pos_start; + let end = location.pos_end; + if (start && end) { + let lower = 0; + let upper = view.allSpans.length; + if (upper > 0) { + while ((upper - lower) > 1) { + var middle = Math.floor((upper + lower) / 2); + var lineStart = view.allSpans[middle].start; + if (lineStart < start) { + lower = middle; + } else if (lineStart > start) { + upper = middle; + } else { + lower = middle; + break; + } + } + var currentSpan = view.allSpans[lower]; + var currentLineElement = currentSpan.parentNode; + if ((currentSpan.start <= start && start < currentSpan.end) || + (currentSpan.start <= end && end < currentSpan.end)) { + if (firstSelect) { + makeContainerPosVisible( + view.divNode, currentLineElement.offsetTop); + firstSelect = false; + } + view.selection.select(currentSpan, selected); + } + } + } + } + }, + brokeredClear: function() { view.selection.clear(); }, + }; + view.selection = new Selection(selectionHandler); + broker.addSelectionHandler(selectionHandler); + + view.handleSpanMouseDown = function(e) { + e.stopPropagation(); + if (!e.shiftKey) { + view.selection.clear(); + } + view.selection.select(this, true); + view.mouseDown = true; + } + + view.handleSpanMouseMove = function(e) { + if (view.mouseDown) { + view.selection.extendTo(this); + } + } + + view.handleCodeMouseDown = function(e) { view.selection.clear(); } + + document.addEventListener('mouseup', function(e) { + view.mouseDown = false; + }, false); + + view.initializeCode(sourceText, sourcePosition); + } + + initializeContent(data, rememberedSelection) { this.data = data; } + + initializeCode(sourceText, sourcePosition) { + var view = this; + if (sourceText == "") { + var newHtml = "
";
+      view.divNode.innerHTML = newHtml;
+    } else {
+      var newHtml =
+          "
" + sourceText + "
"; + view.divNode.innerHTML = newHtml; + try { + // Wrap in try to work when offline. + view.PR.prettyPrint(); + } catch (e) { + } + + view.divNode.onmousedown = this.handleCodeMouseDown; + + var base = sourcePosition; + var current = 0; + var lineListDiv = view.divNode.firstChild.firstChild.childNodes; + for (let i = 0; i < lineListDiv.length; i++) { + var currentLineElement = lineListDiv[i]; + currentLineElement.id = "li" + i; + var pos = base + current; + currentLineElement.pos = pos; + var spans = currentLineElement.childNodes; + for (let j = 0; j < spans.length; ++j) { + var currentSpan = spans[j]; + if (currentSpan.nodeType == 1) { + currentSpan.start = pos; + currentSpan.end = pos + currentSpan.textContent.length; + currentSpan.onmousedown = this.handleSpanMouseDown; + currentSpan.onmousemove = this.handleSpanMouseMove; + view.allSpans.push(currentSpan); + } + current += currentSpan.textContent.length; + pos = base + current; + } + while ((current < sourceText.length) && + (sourceText[current] == '\n' || sourceText[current] == '\r')) { + ++current; + } + } + } + + view.resizeToParent(); + } + + deleteContent() {} +} diff --git a/deps/v8/tools/turbolizer/constants.js b/deps/v8/tools/turbolizer/constants.js new file mode 100644 index 00000000000000..da92c45abc9033 --- /dev/null +++ b/deps/v8/tools/turbolizer/constants.js @@ -0,0 +1,30 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +var MAX_RANK_SENTINEL = 0; +var GRAPH_MARGIN = 250; +var WIDTH = 'width'; +var HEIGHT = 'height'; +var VISIBILITY = 'visibility'; +var SOURCE_PANE_ID = 'left'; +var SOURCE_COLLAPSE_ID = 'source-shrink'; +var SOURCE_EXPAND_ID = 'source-expand'; +var INTERMEDIATE_PANE_ID = 'middle'; +var EMPTY_PANE_ID = 'empty'; +var GRAPH_PANE_ID = 'graph'; +var SCHEDULE_PANE_ID = 'schedule'; +var GENERATED_PANE_ID = 'right'; +var DISASSEMBLY_PANE_ID = 'disassembly'; +var DISASSEMBLY_COLLAPSE_ID = 'disassembly-shrink'; +var DISASSEMBLY_EXPAND_ID = 'disassembly-expand'; +var COLLAPSE_PANE_BUTTON_VISIBLE = 'button-input'; +var COLLAPSE_PANE_BUTTON_INVISIBLE = 'button-input-invisible'; +var UNICODE_BLOCK = '▋'; +var PROF_COLS = [ + { perc: 0, col: { r: 255, g: 255, b: 255 } }, + { perc: 0.5, col: { r: 255, g: 255, b: 128 } }, + { perc: 5, col: { r: 255, g: 128, b: 0 } }, + { perc: 15, col: { r: 255, g: 0, b: 0 } }, + { perc: 100, col: { r: 0, g: 0, b: 0 } } +]; diff --git a/deps/v8/tools/turbolizer/disassembly-view.js b/deps/v8/tools/turbolizer/disassembly-view.js new file mode 100644 index 00000000000000..a2a534cd7f22bc --- /dev/null +++ b/deps/v8/tools/turbolizer/disassembly-view.js @@ -0,0 +1,273 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +"use strict"; + +class DisassemblyView extends TextView { + constructor(id, broker) { + super(id, broker, null, false); + + let view = this; + let ADDRESS_STYLE = { + css: 'tag', + location: function(text) { + ADDRESS_STYLE.last_address = text; + return undefined; + } + }; + let ADDRESS_LINK_STYLE = { + css: 'tag', + link: function(text) { + view.select(function(location) { return location.address == text; }, true, true); + } + }; + let UNCLASSIFIED_STYLE = { + css: 'com' + }; + let NUMBER_STYLE = { + css: 'lit' + }; + let COMMENT_STYLE = { + css: 'com' + }; + let POSITION_STYLE = { + css: 'com', + location: function(text) { + view.pos_start = Number(text); + } + }; + let OPCODE_STYLE = { + css: 'kwd', + location: function(text) { + if (BLOCK_HEADER_STYLE.block_id != undefined) { + return { + address: ADDRESS_STYLE.last_address, + block_id: BLOCK_HEADER_STYLE.block_id + }; + } else { + return { + address: ADDRESS_STYLE.last_address + }; + } + } + }; + const BLOCK_HEADER_STYLE = { + css: 'com', + block_id: -1, + location: function(text) { + let matches = /\d+/.exec(text); + if (!matches) return undefined; + BLOCK_HEADER_STYLE.block_id = Number(matches[0]); + return { + block_id: BLOCK_HEADER_STYLE.block_id + }; + }, + }; + const SOURCE_POSITION_HEADER_STYLE = { + css: 'com', + location: function(text) { + let matches = /(\d+):(\d+)/.exec(text); + if (!matches) return undefined; + let li = Number(matches[1]); + if (view.pos_lines === null) return undefined; + let pos = view.pos_lines[li-1] + Number(matches[2]); + return { + pos_start: pos, + pos_end: pos + 1 + }; + }, + }; + view.SOURCE_POSITION_HEADER_REGEX = /^(\s*-- .+:)(\d+:\d+)( --)/; + let patterns = [ + [ + [/^0x[0-9a-f]{8,16}/, ADDRESS_STYLE, 1], + [view.SOURCE_POSITION_HEADER_REGEX, SOURCE_POSITION_HEADER_STYLE, -1], + [/^\s+-- B\d+ start.*/, BLOCK_HEADER_STYLE, -1], + [/^.*/, UNCLASSIFIED_STYLE, -1] + ], + [ + [/^\s+\d+\s+[0-9a-f]+\s+/, NUMBER_STYLE, 2], + [/^.*/, null, -1] + ], + [ + [/^\S+\s+/, OPCODE_STYLE, 3], + [/^\S+$/, OPCODE_STYLE, -1], + [/^.*/, null, -1] + ], + [ + [/^\s+/, null], + [/^[^\(;]+$/, null, -1], + [/^[^\(;]+/, null], + [/^\(/, null, 4], + [/^;/, COMMENT_STYLE, 5] + ], + [ + [/^0x[0-9a-f]{8,16}/, ADDRESS_LINK_STYLE], + [/^[^\)]/, null], + [/^\)$/, null, -1], + [/^\)/, null, 3] + ], + [ + [/^; debug\: position /, COMMENT_STYLE, 6], + [/^.+$/, COMMENT_STYLE, -1] + ], + [ + [/^\d+$/, POSITION_STYLE, -1], + ] + ]; + view.setPatterns(patterns); + } + + lineLocation(li) { + let view = this; + let result = undefined; + for (let i = 0; i < li.children.length; ++i) { + let fragment = li.children[i]; + let location = fragment.location; + if (location != null) { + if (location.block_id != undefined) { + if (result === undefined) result = {}; + result.block_id = location.block_id; + } + if (location.address != undefined) { + if (result === undefined) result = {}; + result.address = location.address; + } + if (location.pos_start != undefined && location.pos_end != undefined) { + if (result === undefined) result = {}; + result.pos_start = location.pos_start; + result.pos_end = location.pos_end; + } + else if (view.pos_start != -1) { + if (result === undefined) result = {}; + result.pos_start = view.pos_start; + result.pos_end = result.pos_start + 1; + } + } + } + return result; + } + + initializeContent(data, rememberedSelection) { + this.data = data; + super.initializeContent(data, rememberedSelection); + } + + initializeCode(sourceText, sourcePosition) { + let view = this; + view.pos_start = -1; + view.addr_event_counts = null; + view.total_event_counts = null; + view.max_event_counts = null; + view.pos_lines = new Array(); + // Comment lines for line 0 include sourcePosition already, only need to + // add sourcePosition for lines > 0. + view.pos_lines[0] = sourcePosition; + if (sourceText != "") { + let base = sourcePosition; + let current = 0; + let source_lines = sourceText.split("\n"); + for (let i = 1; i < source_lines.length; i++) { + // Add 1 for newline character that is split off. + current += source_lines[i-1].length + 1; + view.pos_lines[i] = base + current; + } + } + } + + initializePerfProfile(eventCounts) { + let view = this; + if (eventCounts !== undefined) { + view.addr_event_counts = eventCounts; + + view.total_event_counts = {}; + view.max_event_counts = {}; + for (let ev_name in view.addr_event_counts) { + let keys = Object.keys(view.addr_event_counts[ev_name]); + let values = keys.map(key => view.addr_event_counts[ev_name][key]); + view.total_event_counts[ev_name] = values.reduce((a, b) => a + b); + view.max_event_counts[ev_name] = values.reduce((a, b) => Math.max(a, b)); + } + } + else { + view.addr_event_counts = null; + view.total_event_counts = null; + view.max_event_counts = null; + } + } + + // Shorten decimals and remove trailing zeroes for readability. + humanize(num) { + return num.toFixed(3).replace(/\.?0+$/, "") + "%"; + } + + // Interpolate between the given start and end values by a fraction of val/max. + interpolate(val, max, start, end) { + return start + (end - start) * (val / max); + } + + processLine(line) { + let view = this; + let func = function(match, p1, p2, p3) { + let nums = p2.split(":"); + let li = Number(nums[0]); + let pos = Number(nums[1]); + if(li === 0) + pos -= view.pos_lines[0]; + li++; + return p1 + li + ":" + pos + p3; + }; + line = line.replace(view.SOURCE_POSITION_HEADER_REGEX, func); + let fragments = super.processLine(line); + + // Add profiling data per instruction if available. + if (view.total_event_counts) { + let matches = /^(0x[0-9a-fA-F]+)\s+\d+\s+[0-9a-fA-F]+/.exec(line); + if (matches) { + let newFragments = []; + for (let event in view.addr_event_counts) { + let count = view.addr_event_counts[event][matches[1]]; + let str = " "; + let css_cls = "prof"; + if(count !== undefined) { + let perc = count / view.total_event_counts[event] * 100; + + let col = { r: 255, g: 255, b: 255 }; + for (let i = 0; i < PROF_COLS.length; i++) { + if (perc === PROF_COLS[i].perc) { + col = PROF_COLS[i].col; + break; + } + else if (perc > PROF_COLS[i].perc && perc < PROF_COLS[i + 1].perc) { + let col1 = PROF_COLS[i].col; + let col2 = PROF_COLS[i + 1].col; + + let val = perc - PROF_COLS[i].perc; + let max = PROF_COLS[i + 1].perc - PROF_COLS[i].perc; + + col.r = Math.round(view.interpolate(val, max, col1.r, col2.r)); + col.g = Math.round(view.interpolate(val, max, col1.g, col2.g)); + col.b = Math.round(view.interpolate(val, max, col1.b, col2.b)); + break; + } + } + + str = UNICODE_BLOCK; + + let fragment = view.createFragment(str, css_cls); + fragment.title = event + ": " + view.humanize(perc) + " (" + count + ")"; + fragment.style.color = "rgb(" + col.r + ", " + col.g + ", " + col.b + ")"; + + newFragments.push(fragment); + } + else + newFragments.push(view.createFragment(str, css_cls)); + + } + fragments = newFragments.concat(fragments); + } + } + return fragments; + } +} diff --git a/deps/v8/tools/turbolizer/edge.js b/deps/v8/tools/turbolizer/edge.js new file mode 100644 index 00000000000000..c0f63a0204ffc0 --- /dev/null +++ b/deps/v8/tools/turbolizer/edge.js @@ -0,0 +1,79 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +var MINIMUM_EDGE_SEPARATION = 20; + +function isEdgeInitiallyVisible(target, index, source, type) { + return type == "control" && (target.cfg || source.cfg); +} + +var Edge = function(target, index, source, type) { + this.target = target; + this.source = source; + this.index = index; + this.type = type; + this.backEdgeNumber = 0; + this.visible = isEdgeInitiallyVisible(target, index, source, type); +}; + +Edge.prototype.stringID = function() { + return this.source.id + "," + this.index + "," + this.target.id; +}; + +Edge.prototype.isVisible = function() { + return this.visible && this.source.visible && this.target.visible; +}; + +Edge.prototype.getInputHorizontalPosition = function(graph) { + if (this.backEdgeNumber > 0) { + return graph.maxGraphNodeX + this.backEdgeNumber * MINIMUM_EDGE_SEPARATION; + } + var source = this.source; + var target = this.target; + var index = this.index; + var input_x = target.x + target.getInputX(index); + var inputApproach = target.getInputApproach(this.index); + var outputApproach = source.getOutputApproach(graph); + if (inputApproach > outputApproach) { + return input_x; + } else { + var inputOffset = MINIMUM_EDGE_SEPARATION * (index + 1); + return (target.x < source.x) + ? (target.x + target.getTotalNodeWidth() + inputOffset) + : (target.x - inputOffset) + } +} + +Edge.prototype.generatePath = function(graph) { + var target = this.target; + var source = this.source; + var input_x = target.x + target.getInputX(this.index); + var arrowheadHeight = 7; + var input_y = target.y - 2 * DEFAULT_NODE_BUBBLE_RADIUS - arrowheadHeight; + var output_x = source.x + source.getOutputX(); + var output_y = source.y + graph.getNodeHeight(source) + DEFAULT_NODE_BUBBLE_RADIUS; + var inputApproach = target.getInputApproach(this.index); + var outputApproach = source.getOutputApproach(graph); + var horizontalPos = this.getInputHorizontalPosition(graph); + + var result = "M" + output_x + "," + output_y + + "L" + output_x + "," + outputApproach + + "L" + horizontalPos + "," + outputApproach; + + if (horizontalPos != input_x) { + result += "L" + horizontalPos + "," + inputApproach; + } else { + if (inputApproach < outputApproach) { + inputApproach = outputApproach; + } + } + + result += "L" + input_x + "," + inputApproach + + "L" + input_x + "," + input_y; + return result; +} + +Edge.prototype.isBackEdge = function() { + return this.target.hasBackEdges() && (this.target.rank < this.source.rank); +} diff --git a/deps/v8/tools/turbolizer/empty-view.js b/deps/v8/tools/turbolizer/empty-view.js new file mode 100644 index 00000000000000..66caf59d8f66a7 --- /dev/null +++ b/deps/v8/tools/turbolizer/empty-view.js @@ -0,0 +1,19 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +"use strict"; + +class EmptyView extends View { + constructor(id, broker) { + super(id, broker); + this.svg = this.divElement.append("svg").attr('version','1.1').attr("width", "100%"); + } + + initializeContent(data, rememberedSelection) { + this.svg.attr("height", document.documentElement.clientHeight + "px"); + } + + deleteContent() { + } +} diff --git a/deps/v8/tools/turbolizer/expand-all.jpg b/deps/v8/tools/turbolizer/expand-all.jpg new file mode 100644 index 00000000000000..df64a2c4aad387 Binary files /dev/null and b/deps/v8/tools/turbolizer/expand-all.jpg differ diff --git a/deps/v8/tools/turbolizer/graph-layout.js b/deps/v8/tools/turbolizer/graph-layout.js new file mode 100644 index 00000000000000..e9b44b4d2cdf26 --- /dev/null +++ b/deps/v8/tools/turbolizer/graph-layout.js @@ -0,0 +1,493 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +var DEFAULT_NODE_ROW_SEPARATION = 130 + +var traceLayout = false; + +function newGraphOccupation(graph){ + var isSlotFilled = []; + var maxSlot = 0; + var minSlot = 0; + var nodeOccupation = []; + + function slotToIndex(slot) { + if (slot >= 0) { + return slot * 2; + } else { + return slot * 2 + 1; + } + } + + function indexToSlot(index) { + if ((index % 0) == 0) { + return index / 2; + } else { + return -((index - 1) / 2); + } + } + + function positionToSlot(pos) { + return Math.floor(pos / NODE_INPUT_WIDTH); + } + + function slotToLeftPosition(slot) { + return slot * NODE_INPUT_WIDTH + } + + function slotToRightPosition(slot) { + return (slot + 1) * NODE_INPUT_WIDTH + } + + function findSpace(pos, width, direction) { + var widthSlots = Math.floor((width + NODE_INPUT_WIDTH - 1) / + NODE_INPUT_WIDTH); + var currentSlot = positionToSlot(pos + width / 2); + var currentScanSlot = currentSlot; + var widthSlotsRemainingLeft = widthSlots; + var widthSlotsRemainingRight = widthSlots; + var slotsChecked = 0; + while (true) { + var mod = slotsChecked++ % 2; + currentScanSlot = currentSlot + (mod ? -1 : 1) * (slotsChecked >> 1); + if (!isSlotFilled[slotToIndex(currentScanSlot)]) { + if (mod) { + if (direction <= 0) --widthSlotsRemainingLeft + } else { + if (direction >= 0) --widthSlotsRemainingRight + } + if (widthSlotsRemainingLeft == 0 || + widthSlotsRemainingRight == 0 || + (widthSlotsRemainingLeft + widthSlotsRemainingRight) == widthSlots && + (widthSlots == slotsChecked)) { + if (mod) { + return [currentScanSlot, widthSlots]; + } else { + return [currentScanSlot - widthSlots + 1, widthSlots]; + } + } + } else { + if (mod) { + widthSlotsRemainingLeft = widthSlots; + } else { + widthSlotsRemainingRight = widthSlots; + } + } + } + } + + function setIndexRange(from, to, value) { + if (to < from) { + throw("illegal slot range"); + } + while (from <= to) { + if (from > maxSlot) { + maxSlot = from; + } + if (from < minSlot) { + minSlot = from; + } + isSlotFilled[slotToIndex(from++)] = value; + } + } + + function occupySlotRange(from, to) { + if (traceLayout) { + console.log("Occupied [" + slotToLeftPosition(from) + " " + slotToLeftPosition(to + 1) + ")"); + } + setIndexRange(from, to, true); + } + + function clearSlotRange(from, to) { + if (traceLayout) { + console.log("Cleared [" + slotToLeftPosition(from) + " " + slotToLeftPosition(to + 1) + ")"); + } + setIndexRange(from, to, false); + } + + function occupyPositionRange(from, to) { + occupySlotRange(positionToSlot(from), positionToSlot(to - 1)); + } + + function clearPositionRange(from, to) { + clearSlotRange(positionToSlot(from), positionToSlot(to - 1)); + } + + function occupyPositionRangeWithMargin(from, to, margin) { + var fromMargin = from - Math.floor(margin); + var toMargin = to + Math.floor(margin); + occupyPositionRange(fromMargin, toMargin); + } + + function clearPositionRangeWithMargin(from, to, margin) { + var fromMargin = from - Math.floor(margin); + var toMargin = to + Math.floor(margin); + clearPositionRange(fromMargin, toMargin); + } + + var occupation = { + occupyNodeInputs: function(node) { + for (var i = 0; i < node.inputs.length; ++i) { + if (node.inputs[i].isVisible()) { + var edge = node.inputs[i]; + if (!edge.isBackEdge()) { + var source = edge.source; + var horizontalPos = edge.getInputHorizontalPosition(graph); + if (traceLayout) { + console.log("Occupying input " + i + " of " + node.id + " at " + horizontalPos); + } + occupyPositionRangeWithMargin(horizontalPos, + horizontalPos, + NODE_INPUT_WIDTH / 2); + } + } + } + }, + occupyNode: function(node) { + var getPlacementHint = function(n) { + var pos = 0; + var direction = -1; + var outputEdges = 0; + var inputEdges = 0; + for (var k = 0; k < n.outputs.length; ++k) { + var outputEdge = n.outputs[k]; + if (outputEdge.isVisible()) { + var output = n.outputs[k].target; + for (var l = 0; l < output.inputs.length; ++l) { + if (output.rank > n.rank) { + var inputEdge = output.inputs[l]; + if (inputEdge.isVisible()) { + ++inputEdges; + } + if (output.inputs[l].source == n) { + pos += output.x + output.getInputX(l) + NODE_INPUT_WIDTH / 2; + outputEdges++; + if (l >= (output.inputs.length / 2)) { + direction = 1; + } + } + } + } + } + } + if (outputEdges != 0) { + pos = pos / outputEdges; + } + if (outputEdges > 1 || inputEdges == 1) { + direction = 0; + } + return [direction, pos]; + } + var width = node.getTotalNodeWidth(); + var margin = MINIMUM_EDGE_SEPARATION; + var paddedWidth = width + 2 * margin; + var placementHint = getPlacementHint(node); + var x = placementHint[1] - paddedWidth + margin; + if (traceLayout) { + console.log("Node " + node.id + " placement hint [" + x + ", " + (x + paddedWidth) + ")"); + } + var placement = findSpace(x, paddedWidth, placementHint[0]); + var firstSlot = placement[0]; + var slotWidth = placement[1]; + var endSlotExclusive = firstSlot + slotWidth - 1; + occupySlotRange(firstSlot, endSlotExclusive); + nodeOccupation.push([firstSlot, endSlotExclusive]); + if (placementHint[0] < 0) { + return slotToLeftPosition(firstSlot + slotWidth) - width - margin; + } else if (placementHint[0] > 0) { + return slotToLeftPosition(firstSlot) + margin; + } else { + return slotToLeftPosition(firstSlot + slotWidth / 2) - (width / 2); + } + }, + clearOccupiedNodes: function() { + nodeOccupation.forEach(function(o) { + clearSlotRange(o[0], o[1]); + }); + nodeOccupation = []; + }, + clearNodeOutputs: function(source) { + source.outputs.forEach(function(edge) { + if (edge.isVisible()) { + var target = edge.target; + for (var i = 0; i < target.inputs.length; ++i) { + if (target.inputs[i].source === source) { + var horizontalPos = edge.getInputHorizontalPosition(graph); + clearPositionRangeWithMargin(horizontalPos, + horizontalPos, + NODE_INPUT_WIDTH / 2); + } + } + } + }); + }, + print: function() { + var s = ""; + for (var currentSlot = -40; currentSlot < 40; ++currentSlot) { + if (currentSlot != 0) { + s += " "; + } else { + s += "|"; + } + } + console.log(s); + s = ""; + for (var currentSlot2 = -40; currentSlot2 < 40; ++currentSlot2) { + if (isSlotFilled[slotToIndex(currentSlot2)]) { + s += "*"; + } else { + s += " "; + } + } + console.log(s); + } + } + return occupation; +} + +function layoutNodeGraph(graph) { + // First determine the set of nodes that have no outputs. Those are the + // basis for bottom-up DFS to determine rank and node placement. + var endNodesHasNoOutputs = []; + var startNodesHasNoInputs = []; + graph.nodes.forEach(function(n, i){ + endNodesHasNoOutputs[n.id] = true; + startNodesHasNoInputs[n.id] = true; + }); + graph.edges.forEach(function(e, i){ + endNodesHasNoOutputs[e.source.id] = false; + startNodesHasNoInputs[e.target.id] = false; + }); + + // Finialize the list of start and end nodes. + var endNodes = []; + var startNodes = []; + var visited = []; + var rank = []; + graph.nodes.forEach(function(n, i){ + if (endNodesHasNoOutputs[n.id]) { + endNodes.push(n); + } + if (startNodesHasNoInputs[n.id]) { + startNodes.push(n); + } + visited[n.id] = false; + rank[n.id] = -1; + n.rank = 0; + n.visitOrderWithinRank = 0; + n.outputApproach = MINIMUM_NODE_OUTPUT_APPROACH; + }); + + + var maxRank = 0; + var visited = []; + var dfsStack = []; + var visitOrderWithinRank = 0; + + var worklist = startNodes.slice(); + while (worklist.length != 0) { + var n = worklist.pop(); + var changed = false; + if (n.rank == MAX_RANK_SENTINEL) { + n.rank = 1; + changed = true; + } + var begin = 0; + var end = n.inputs.length; + if (n.opcode == 'Phi' || n.opcode == 'EffectPhi') { + // Keep with merge or loop node + begin = n.inputs.length - 1; + } else if (n.hasBackEdges()) { + end = 1; + } + for (var l = begin; l < end; ++l) { + var input = n.inputs[l].source; + if (input.visible && input.rank >= n.rank) { + n.rank = input.rank + 1; + changed = true; + } + } + if (changed) { + var hasBackEdges = n.hasBackEdges(); + for (var l = n.outputs.length - 1; l >= 0; --l) { + if (hasBackEdges && (l != 0)) { + worklist.unshift(n.outputs[l].target); + } else { + worklist.push(n.outputs[l].target); + } + } + } + if (n.rank > maxRank) { + maxRank = n.rank; + } + } + + visited = []; + function dfsFindRankLate(n) { + if (visited[n.id]) return; + visited[n.id] = true; + var originalRank = n.rank; + var newRank = n.rank; + var firstInput = true; + for (var l = 0; l < n.outputs.length; ++l) { + var output = n.outputs[l].target; + dfsFindRankLate(output); + var outputRank = output.rank; + if (output.visible && (firstInput || outputRank <= newRank) && + (outputRank > originalRank)) { + newRank = outputRank - 1; + } + firstInput = false; + } + if (n.opcode != "Start" && n.opcode != "Phi" && n.opcode != "EffectPhi") { + n.rank = newRank; + } + } + + startNodes.forEach(dfsFindRankLate); + + visited = []; + function dfsRankOrder(n) { + if (visited[n.id]) return; + visited[n.id] = true; + for (var l = 0; l < n.outputs.length; ++l) { + var edge = n.outputs[l]; + if (edge.isVisible()) { + var output = edge.target; + dfsRankOrder(output); + } + } + if (n.visitOrderWithinRank == 0) { + n.visitOrderWithinRank = ++visitOrderWithinRank; + } + } + startNodes.forEach(dfsRankOrder); + + endNodes.forEach(function(n) { + n.rank = maxRank + 1; + }); + + var rankSets = []; + // Collect sets for each rank. + graph.nodes.forEach(function(n, i){ + n.y = n.rank * (DEFAULT_NODE_ROW_SEPARATION + graph.getNodeHeight(n) + + 2 * DEFAULT_NODE_BUBBLE_RADIUS); + if (n.visible) { + if (rankSets[n.rank] === undefined) { + rankSets[n.rank] = [n]; + } else { + rankSets[n.rank].push(n); + } + } + }); + + // Iterate backwards from highest to lowest rank, placing nodes so that they + // spread out from the "center" as much as possible while still being + // compact and not overlapping live input lines. + var occupation = newGraphOccupation(graph); + var rankCount = 0; + + rankSets.reverse().forEach(function(rankSet) { + + for (var i = 0; i < rankSet.length; ++i) { + occupation.clearNodeOutputs(rankSet[i]); + } + + if (traceLayout) { + console.log("After clearing outputs"); + occupation.print(); + } + + var placedCount = 0; + rankSet = rankSet.sort(function(a,b) { + return a.visitOrderWithinRank < b.visitOrderWithinRank; + }); + for (var i = 0; i < rankSet.length; ++i) { + var nodeToPlace = rankSet[i]; + if (nodeToPlace.visible) { + nodeToPlace.x = occupation.occupyNode(nodeToPlace); + if (traceLayout) { + console.log("Node " + nodeToPlace.id + " is placed between [" + nodeToPlace.x + ", " + (nodeToPlace.x + nodeToPlace.getTotalNodeWidth()) + ")"); + } + var staggeredFlooredI = Math.floor(placedCount++ % 3); + var delta = MINIMUM_EDGE_SEPARATION * staggeredFlooredI + nodeToPlace.outputApproach += delta; + } else { + nodeToPlace.x = 0; + } + } + + if (traceLayout) { + console.log("Before clearing nodes"); + occupation.print(); + } + + occupation.clearOccupiedNodes(); + + if (traceLayout) { + console.log("After clearing nodes"); + occupation.print(); + } + + for (var i = 0; i < rankSet.length; ++i) { + var node = rankSet[i]; + occupation.occupyNodeInputs(node); + } + + if (traceLayout) { + console.log("After occupying inputs"); + occupation.print(); + } + + if (traceLayout) { + console.log("After determining bounding box"); + occupation.print(); + } + }); + + graph.maxBackEdgeNumber = 0; + graph.visibleEdges.each(function (e) { + if (e.isBackEdge()) { + e.backEdgeNumber = ++graph.maxBackEdgeNumber; + } else { + e.backEdgeNumber = 0; + } + }); + + redetermineGraphBoundingBox(graph); + +} + +function redetermineGraphBoundingBox(graph) { + graph.minGraphX = 0; + graph.maxGraphNodeX = 1; + graph.maxGraphX = undefined; // see below + graph.minGraphY = 0; + graph.maxGraphY = 1; + + for (var i = 0; i < graph.nodes.length; ++i) { + var node = graph.nodes[i]; + + if (!node.visible) { + continue; + } + + if (node.x < graph.minGraphX) { + graph.minGraphX = node.x; + } + if ((node.x + node.getTotalNodeWidth()) > graph.maxGraphNodeX) { + graph.maxGraphNodeX = node.x + node.getTotalNodeWidth(); + } + if ((node.y - 50) < graph.minGraphY) { + graph.minGraphY = node.y - 50; + } + if ((node.y + graph.getNodeHeight(node) + 50) > graph.maxGraphY) { + graph.maxGraphY = node.y + graph.getNodeHeight(node) + 50; + } + } + + graph.maxGraphX = graph.maxGraphNodeX + + graph.maxBackEdgeNumber * MINIMUM_EDGE_SEPARATION; + +} diff --git a/deps/v8/tools/turbolizer/graph-view.js b/deps/v8/tools/turbolizer/graph-view.js new file mode 100644 index 00000000000000..8de050f3e682e0 --- /dev/null +++ b/deps/v8/tools/turbolizer/graph-view.js @@ -0,0 +1,1033 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +"use strict"; + +class GraphView extends View { + constructor (d3, id, nodes, edges, broker) { + super(id, broker); + var graph = this; + + var svg = this.divElement.append("svg").attr('version','1.1').attr("width", "100%"); + graph.svg = svg; + + graph.nodes = nodes || []; + graph.edges = edges || []; + + graph.minGraphX = 0; + graph.maxGraphX = 1; + graph.minGraphY = 0; + graph.maxGraphY = 1; + + graph.state = { + selection: null, + mouseDownNode: null, + justDragged: false, + justScaleTransGraph: false, + lastKeyDown: -1, + showTypes: false + }; + + var selectionHandler = { + clear: function() { + broker.clear(selectionHandler); + }, + select: function(items, selected) { + var locations = []; + for (var d of items) { + if (selected) { + d.classList.add("selected"); + } else { + d.classList.remove("selected"); + } + var data = d.__data__; + locations.push({ pos_start: data.pos, pos_end: data.pos + 1, node_id: data.id}); + } + broker.select(selectionHandler, locations, selected); + }, + selectionDifference: function(span1, inclusive1, span2, inclusive2) { + // Should not be called + }, + brokeredSelect: function(locations, selected) { + var test = [].entries().next(); + var selection = graph.nodes + .filter(function(n) { + var pos = n.pos; + for (var location of locations) { + var start = location.pos_start; + var end = location.pos_end; + var id = location.node_id; + if (end != undefined) { + if (pos >= start && pos < end) { + return true; + } + } else if (start != undefined) { + if (pos === start) { + return true; + } + } else { + if (n.id === id) { + return true; + } + } + } + return false; + }); + var newlySelected = new Set(); + selection.forEach(function(n) { + newlySelected.add(n); + if (!n.visible) { + n.visible = true; + } + }); + graph.updateGraphVisibility(); + graph.visibleNodes.each(function(n) { + if (newlySelected.has(n)) { + graph.state.selection.select(this, selected); + } + }); + graph.updateGraphVisibility(); + graph.viewSelection(); + }, + brokeredClear: function() { + graph.state.selection.clear(); + } + }; + broker.addSelectionHandler(selectionHandler); + + graph.state.selection = new Selection(selectionHandler); + + var defs = svg.append('svg:defs'); + defs.append('svg:marker') + .attr('id', 'end-arrow') + .attr('viewBox', '0 -4 8 8') + .attr('refX', 2) + .attr('markerWidth', 2.5) + .attr('markerHeight', 2.5) + .attr('orient', 'auto') + .append('svg:path') + .attr('d', 'M0,-4L8,0L0,4'); + + this.graphElement = svg.append("g"); + graph.visibleEdges = this.graphElement.append("g").selectAll("g"); + graph.visibleNodes = this.graphElement.append("g").selectAll("g"); + + graph.drag = d3.behavior.drag() + .origin(function(d){ + return {x: d.x, y: d.y}; + }) + .on("drag", function(args){ + graph.state.justDragged = true; + graph.dragmove.call(graph, args); + }) + + d3.select("#upload").on("click", partial(this.uploadAction, graph)); + d3.select("#layout").on("click", partial(this.layoutAction, graph)); + d3.select("#show-all").on("click", partial(this.showAllAction, graph)); + d3.select("#hide-dead").on("click", partial(this.hideDeadAction, graph)); + d3.select("#hide-unselected").on("click", partial(this.hideUnselectedAction, graph)); + d3.select("#hide-selected").on("click", partial(this.hideSelectedAction, graph)); + d3.select("#zoom-selection").on("click", partial(this.zoomSelectionAction, graph)); + d3.select("#toggle-types").on("click", partial(this.toggleTypesAction, graph)); + d3.select("#search-input").on("keydown", partial(this.searchInputAction, graph)); + + // listen for key events + d3.select(window).on("keydown", function(e){ + graph.svgKeyDown.call(graph); + }) + .on("keyup", function(){ + graph.svgKeyUp.call(graph); + }); + svg.on("mousedown", function(d){graph.svgMouseDown.call(graph, d);}); + svg.on("mouseup", function(d){graph.svgMouseUp.call(graph, d);}); + + graph.dragSvg = d3.behavior.zoom() + .on("zoom", function(){ + if (d3.event.sourceEvent.shiftKey){ + return false; + } else{ + graph.zoomed.call(graph); + } + return true; + }) + .on("zoomstart", function(){ + if (!d3.event.sourceEvent.shiftKey) d3.select('body').style("cursor", "move"); + }) + .on("zoomend", function(){ + d3.select('body').style("cursor", "auto"); + }); + + svg.call(graph.dragSvg).on("dblclick.zoom", null); + } + + static get selectedClass() { + return "selected"; + } + static get rectClass() { + return "nodeStyle"; + } + static get activeEditId() { + return "active-editing"; + } + static get nodeRadius() { + return 50; + } + + getNodeHeight(d) { + if (this.state.showTypes) { + return d.normalheight + d.labelbbox.height; + } else { + return d.normalheight; + } + } + + getEdgeFrontier(nodes, inEdges, edgeFilter) { + let frontier = new Set(); + nodes.forEach(function(element) { + var edges = inEdges ? element.__data__.inputs : element.__data__.outputs; + var edgeNumber = 0; + edges.forEach(function(edge) { + if (edgeFilter == undefined || edgeFilter(edge, edgeNumber)) { + frontier.add(edge); + } + ++edgeNumber; + }); + }); + return frontier; + } + + getNodeFrontier(nodes, inEdges, edgeFilter) { + let graph = this; + var frontier = new Set(); + var newState = true; + var edgeFrontier = graph.getEdgeFrontier(nodes, inEdges, edgeFilter); + // Control key toggles edges rather than just turning them on + if (d3.event.ctrlKey) { + edgeFrontier.forEach(function(edge) { + if (edge.visible) { + newState = false; + } + }); + } + edgeFrontier.forEach(function(edge) { + edge.visible = newState; + if (newState) { + var node = inEdges ? edge.source : edge.target; + node.visible = true; + frontier.add(node); + } + }); + graph.updateGraphVisibility(); + if (newState) { + return graph.visibleNodes.filter(function(n) { + return frontier.has(n); + }); + } else { + return undefined; + } + } + + dragmove(d) { + var graph = this; + d.x += d3.event.dx; + d.y += d3.event.dy; + graph.updateGraphVisibility(); + } + + initializeContent(data, rememberedSelection) { + this.createGraph(data, rememberedSelection); + if (rememberedSelection != null) { + this.attachSelection(rememberedSelection); + this.connectVisibleSelectedNodes(); + this.viewSelection(); + } + this.updateGraphVisibility(); + } + + deleteContent() { + if (this.visibleNodes) { + this.nodes = []; + this.edges = []; + this.nodeMap = []; + this.updateGraphVisibility(); + } + }; + + measureText(text) { + var textMeasure = document.getElementById('text-measure'); + textMeasure.textContent = text; + return { + width: textMeasure.getBBox().width, + height: textMeasure.getBBox().height, + }; + } + + createGraph(data, initiallyVisibileIds) { + var g = this; + g.nodes = data.nodes; + g.nodeMap = []; + g.nodes.forEach(function(n, i){ + n.__proto__ = Node; + n.visible = false; + n.x = 0; + n.y = 0; + n.rank = MAX_RANK_SENTINEL; + n.inputs = []; + n.outputs = []; + n.rpo = -1; + n.outputApproach = MINIMUM_NODE_OUTPUT_APPROACH; + n.cfg = n.control; + g.nodeMap[n.id] = n; + n.displayLabel = n.getDisplayLabel(); + n.labelbbox = g.measureText(n.displayLabel); + n.typebbox = g.measureText(n.getDisplayType()); + var innerwidth = Math.max(n.labelbbox.width, n.typebbox.width); + n.width = Math.alignUp(innerwidth + NODE_INPUT_WIDTH * 2, + NODE_INPUT_WIDTH); + var innerheight = Math.max(n.labelbbox.height, n.typebbox.height); + n.normalheight = innerheight + 20; + }); + g.edges = []; + data.edges.forEach(function(e, i){ + var t = g.nodeMap[e.target]; + var s = g.nodeMap[e.source]; + var newEdge = new Edge(t, e.index, s, e.type); + t.inputs.push(newEdge); + s.outputs.push(newEdge); + g.edges.push(newEdge); + if (e.type == 'control') { + s.cfg = true; + } + }); + g.nodes.forEach(function(n, i) { + n.visible = isNodeInitiallyVisible(n); + if (initiallyVisibileIds != undefined) { + if (initiallyVisibileIds.has(n.id)) { + n.visible = true; + } + } + }); + g.fitGraphViewToWindow(); + g.updateGraphVisibility(); + g.layoutGraph(); + g.updateGraphVisibility(); + g.viewWholeGraph(); + } + + connectVisibleSelectedNodes() { + var graph = this; + graph.state.selection.selection.forEach(function(element) { + var edgeNumber = 0; + element.__data__.inputs.forEach(function(edge) { + if (edge.source.visible && edge.target.visible) { + edge.visible = true; + } + }); + element.__data__.outputs.forEach(function(edge) { + if (edge.source.visible && edge.target.visible) { + edge.visible = true; + } + }); + }); + } + + updateInputAndOutputBubbles() { + var g = this; + var s = g.visibleBubbles; + s.classed("filledBubbleStyle", function(c) { + var components = this.id.split(','); + if (components[0] == "ib") { + var edge = g.nodeMap[components[3]].inputs[components[2]]; + return edge.isVisible(); + } else { + return g.nodeMap[components[1]].areAnyOutputsVisible() == 2; + } + }).classed("halfFilledBubbleStyle", function(c) { + var components = this.id.split(','); + if (components[0] == "ib") { + var edge = g.nodeMap[components[3]].inputs[components[2]]; + return false; + } else { + return g.nodeMap[components[1]].areAnyOutputsVisible() == 1; + } + }).classed("bubbleStyle", function(c) { + var components = this.id.split(','); + if (components[0] == "ib") { + var edge = g.nodeMap[components[3]].inputs[components[2]]; + return !edge.isVisible(); + } else { + return g.nodeMap[components[1]].areAnyOutputsVisible() == 0; + } + }); + s.each(function(c) { + var components = this.id.split(','); + if (components[0] == "ob") { + var from = g.nodeMap[components[1]]; + var x = from.getOutputX(); + var y = g.getNodeHeight(from) + DEFAULT_NODE_BUBBLE_RADIUS; + var transform = "translate(" + x + "," + y + ")"; + this.setAttribute('transform', transform); + } + }); + } + + attachSelection(s) { + var graph = this; + if (s.size != 0) { + this.visibleNodes.each(function(n) { + if (s.has(this.__data__.id)) { + graph.state.selection.select(this, true); + } + }); + } + } + + detachSelection() { + var selection = this.state.selection.detachSelection(); + var s = new Set(); + for (var i of selection) { + s.add(i.__data__.id); + }; + return s; + } + + pathMouseDown(path, d) { + d3.event.stopPropagation(); + this.state.selection.clear(); + this.state.selection.add(path); + }; + + nodeMouseDown(node, d) { + d3.event.stopPropagation(); + this.state.mouseDownNode = d; + } + + nodeMouseUp(d3node, d) { + var graph = this, + state = graph.state, + consts = graph.consts; + + var mouseDownNode = state.mouseDownNode; + + if (!mouseDownNode) return; + + if (state.justDragged) { + // dragged, not clicked + redetermineGraphBoundingBox(graph); + state.justDragged = false; + } else{ + // clicked, not dragged + var extend = d3.event.shiftKey; + var selection = graph.state.selection; + if (!extend) { + selection.clear(); + } + selection.select(d3node[0][0], true); + } + } + + selectSourcePositions(start, end, selected) { + var graph = this; + var map = []; + var sel = graph.nodes.filter(function(n) { + var pos = (n.pos === undefined) + ? -1 + : n.getFunctionRelativeSourcePosition(graph); + if (pos >= start && pos < end) { + map[n.id] = true; + n.visible = true; + } + }); + graph.updateGraphVisibility(); + graph.visibleNodes.filter(function(n) { return map[n.id]; }) + .each(function(n) { + var selection = graph.state.selection; + selection.select(d3.select(this), selected); + }); + } + + selectAllNodes(inEdges, filter) { + var graph = this; + if (!d3.event.shiftKey) { + graph.state.selection.clear(); + } + graph.state.selection.select(graph.visibleNodes[0], true); + graph.updateGraphVisibility(); + } + + uploadAction(graph) { + document.getElementById("hidden-file-upload").click(); + } + + layoutAction(graph) { + graph.updateGraphVisibility(); + graph.layoutGraph(); + graph.updateGraphVisibility(); + graph.viewWholeGraph(); + } + + showAllAction(graph) { + graph.nodes.filter(function(n) { n.visible = true; }) + graph.edges.filter(function(e) { e.visible = true; }) + graph.updateGraphVisibility(); + graph.viewWholeGraph(); + } + + hideDeadAction(graph) { + graph.nodes.filter(function(n) { if (!n.isLive()) n.visible = false; }) + graph.updateGraphVisibility(); + } + + hideUnselectedAction(graph) { + var unselected = graph.visibleNodes.filter(function(n) { + return !this.classList.contains("selected"); + }); + unselected.each(function(n) { + n.visible = false; + }); + graph.updateGraphVisibility(); + } + + hideSelectedAction(graph) { + var selected = graph.visibleNodes.filter(function(n) { + return this.classList.contains("selected"); + }); + selected.each(function(n) { + n.visible = false; + }); + graph.state.selection.clear(); + graph.updateGraphVisibility(); + } + + zoomSelectionAction(graph) { + graph.viewSelection(); + } + + toggleTypesAction(graph) { + graph.toggleTypes(); + } + + searchInputAction(graph) { + if (d3.event.keyCode == 13) { + graph.state.selection.clear(); + var query = this.value; + window.sessionStorage.setItem("lastSearch", query); + + var reg = new RegExp(query); + var filterFunction = function(n) { + return (reg.exec(n.getDisplayLabel()) != null || + (graph.state.showTypes && reg.exec(n.getDisplayType())) || + reg.exec(n.opcode) != null); + }; + if (d3.event.ctrlKey) { + graph.nodes.forEach(function(n, i) { + if (filterFunction(n)) { + n.visible = true; + } + }); + graph.updateGraphVisibility(); + } + var selected = graph.visibleNodes.each(function(n) { + if (filterFunction(n)) { + graph.state.selection.select(this, true); + } + }); + graph.connectVisibleSelectedNodes(); + graph.updateGraphVisibility(); + this.blur(); + graph.viewSelection(); + } + d3.event.stopPropagation(); + } + + svgMouseDown() { + this.state.graphMouseDown = true; + } + + svgMouseUp() { + var graph = this, + state = graph.state; + if (state.justScaleTransGraph) { + // Dragged + state.justScaleTransGraph = false; + } else { + // Clicked + if (state.mouseDownNode == null) { + graph.state.selection.clear(); + } + } + state.mouseDownNode = null; + state.graphMouseDown = false; + } + + svgKeyDown() { + var state = this.state; + var graph = this; + + // Don't handle key press repetition + if(state.lastKeyDown !== -1) return; + + var showSelectionFrontierNodes = function(inEdges, filter, select) { + var frontier = graph.getNodeFrontier(state.selection.selection, inEdges, filter); + if (frontier != undefined) { + if (select) { + if (!d3.event.shiftKey) { + state.selection.clear(); + } + state.selection.select(frontier[0], true); + } + graph.updateGraphVisibility(); + } + allowRepetition = false; + } + + var allowRepetition = true; + var eventHandled = true; // unless the below switch defaults + switch(d3.event.keyCode) { + case 49: + case 50: + case 51: + case 52: + case 53: + case 54: + case 55: + case 56: + case 57: + // '1'-'9' + showSelectionFrontierNodes(true, + (edge, index) => { return index == (d3.event.keyCode - 49); }, + false); + break; + case 97: + case 98: + case 99: + case 100: + case 101: + case 102: + case 103: + case 104: + case 105: + // 'numpad 1'-'numpad 9' + showSelectionFrontierNodes(true, + (edge, index) => { return index == (d3.event.keyCode - 97); }, + false); + break; + case 67: + // 'c' + showSelectionFrontierNodes(true, + (edge, index) => { return edge.type == 'control'; }, + false); + break; + case 69: + // 'e' + showSelectionFrontierNodes(true, + (edge, index) => { return edge.type == 'effect'; }, + false); + break; + case 79: + // 'o' + showSelectionFrontierNodes(false, undefined, false); + break; + case 73: + // 'i' + showSelectionFrontierNodes(true, undefined, false); + break; + case 65: + // 'a' + graph.selectAllNodes(); + allowRepetition = false; + break; + case 38: + case 40: { + showSelectionFrontierNodes(d3.event.keyCode == 38, undefined, true); + break; + } + case 82: + // 'r' + if (!d3.event.ctrlKey) { + this.layoutAction(this); + } else { + eventHandled = false; + } + break; + case 191: + // '/' + document.getElementById("search-input").focus(); + document.getElementById("search-input").select(); + break; + default: + eventHandled = false; + break; + } + if (eventHandled) { + d3.event.preventDefault(); + } + if (!allowRepetition) { + state.lastKeyDown = d3.event.keyCode; + } + } + + svgKeyUp() { + this.state.lastKeyDown = -1 + }; + + layoutEdges() { + var graph = this; + graph.maxGraphX = graph.maxGraphNodeX; + this.visibleEdges.attr("d", function(edge){ + return edge.generatePath(graph); + }); + } + + layoutGraph() { + layoutNodeGraph(this); + } + + // call to propagate changes to graph + updateGraphVisibility() { + + var graph = this, + state = graph.state; + + var filteredEdges = graph.edges.filter(function(e) { return e.isVisible(); }); + var visibleEdges = graph.visibleEdges.data(filteredEdges, function(edge) { + return edge.stringID(); + }); + + // add new paths + visibleEdges.enter() + .append('path') + .style('marker-end','url(#end-arrow)') + .classed('hidden', function(e) { + return !e.isVisible(); + }) + .attr("id", function(edge){ return "e," + edge.stringID(); }) + .on("mousedown", function(d){ + graph.pathMouseDown.call(graph, d3.select(this), d); + }) + + // Set the correct styles on all of the paths + visibleEdges.classed('value', function(e) { + return e.type == 'value' || e.type == 'context'; + }).classed('control', function(e) { + return e.type == 'control'; + }).classed('effect', function(e) { + return e.type == 'effect'; + }).classed('frame-state', function(e) { + return e.type == 'frame-state'; + }).attr('stroke-dasharray', function(e) { + if (e.type == 'frame-state') return "10,10"; + return (e.type == 'effect') ? "5,5" : ""; + }); + + // remove old links + visibleEdges.exit().remove(); + + graph.visibleEdges = visibleEdges; + + // update existing nodes + var filteredNodes = graph.nodes.filter(function(n) { return n.visible; }); + graph.visibleNodes = graph.visibleNodes.data(filteredNodes, function(d) { + return d.id; + }); + graph.visibleNodes.attr("transform", function(n){ + return "translate(" + n.x + "," + n.y + ")"; + }).select('rect'). + attr(HEIGHT, function(d) { return graph.getNodeHeight(d); }); + + // add new nodes + var newGs = graph.visibleNodes.enter() + .append("g"); + + newGs.classed("control", function(n) { return n.isControl(); }) + .classed("live", function(n) { return n.isLive(); }) + .classed("dead", function(n) { return !n.isLive(); }) + .classed("javascript", function(n) { return n.isJavaScript(); }) + .classed("input", function(n) { return n.isInput(); }) + .classed("simplified", function(n) { return n.isSimplified(); }) + .classed("machine", function(n) { return n.isMachine(); }) + .attr("transform", function(d){ return "translate(" + d.x + "," + d.y + ")";}) + .on("mousedown", function(d){ + graph.nodeMouseDown.call(graph, d3.select(this), d); + }) + .on("mouseup", function(d){ + graph.nodeMouseUp.call(graph, d3.select(this), d); + }) + .call(graph.drag); + + newGs.append("rect") + .attr("rx", 10) + .attr("ry", 10) + .attr(WIDTH, function(d) { + return d.getTotalNodeWidth(); + }) + .attr(HEIGHT, function(d) { + return graph.getNodeHeight(d); + }) + + function appendInputAndOutputBubbles(g, d) { + for (var i = 0; i < d.inputs.length; ++i) { + var x = d.getInputX(i); + var y = -DEFAULT_NODE_BUBBLE_RADIUS; + var s = g.append('circle') + .classed("filledBubbleStyle", function(c) { + return d.inputs[i].isVisible(); + } ) + .classed("bubbleStyle", function(c) { + return !d.inputs[i].isVisible(); + } ) + .attr("id", "ib," + d.inputs[i].stringID()) + .attr("r", DEFAULT_NODE_BUBBLE_RADIUS) + .attr("transform", function(d) { + return "translate(" + x + "," + y + ")"; + }) + .on("mousedown", function(d){ + var components = this.id.split(','); + var node = graph.nodeMap[components[3]]; + var edge = node.inputs[components[2]]; + var visible = !edge.isVisible(); + node.setInputVisibility(components[2], visible); + d3.event.stopPropagation(); + graph.updateGraphVisibility(); + }); + } + if (d.outputs.length != 0) { + var x = d.getOutputX(); + var y = graph.getNodeHeight(d) + DEFAULT_NODE_BUBBLE_RADIUS; + var s = g.append('circle') + .classed("filledBubbleStyle", function(c) { + return d.areAnyOutputsVisible() == 2; + } ) + .classed("halFilledBubbleStyle", function(c) { + return d.areAnyOutputsVisible() == 1; + } ) + .classed("bubbleStyle", function(c) { + return d.areAnyOutputsVisible() == 0; + } ) + .attr("id", "ob," + d.id) + .attr("r", DEFAULT_NODE_BUBBLE_RADIUS) + .attr("transform", function(d) { + return "translate(" + x + "," + y + ")"; + }) + .on("mousedown", function(d) { + d.setOutputVisibility(d.areAnyOutputsVisible() == 0); + d3.event.stopPropagation(); + graph.updateGraphVisibility(); + }); + } + } + + newGs.each(function(d){ + appendInputAndOutputBubbles(d3.select(this), d); + }); + + newGs.each(function(d){ + d3.select(this).append("text") + .classed("label", true) + .attr("text-anchor","right") + .attr("dx", 5) + .attr("dy", 5) + .append('tspan') + .text(function(l) { + return d.getDisplayLabel(); + }) + .append("title") + .text(function(l) { + return d.getTitle(); + }) + if (d.type != undefined) { + d3.select(this).append("text") + .classed("label", true) + .classed("type", true) + .attr("text-anchor","right") + .attr("dx", 5) + .attr("dy", d.labelbbox.height + 5) + .append('tspan') + .text(function(l) { + return d.getDisplayType(); + }) + .append("title") + .text(function(l) { + return d.getType(); + }) + } + }); + + graph.visibleNodes.select('.type').each(function (d) { + this.setAttribute('visibility', graph.state.showTypes ? 'visible' : 'hidden'); + }); + + // remove old nodes + graph.visibleNodes.exit().remove(); + + graph.visibleBubbles = d3.selectAll('circle'); + + graph.updateInputAndOutputBubbles(); + + graph.layoutEdges(); + + graph.svg.style.height = '100%'; + } + + getVisibleTranslation(translate, scale) { + var graph = this; + var height = (graph.maxGraphY - graph.minGraphY + 2 * GRAPH_MARGIN) * scale; + var width = (graph.maxGraphX - graph.minGraphX + 2 * GRAPH_MARGIN) * scale; + + var dimensions = this.getSvgViewDimensions(); + + var baseY = translate[1]; + var minY = (graph.minGraphY - GRAPH_MARGIN) * scale; + var maxY = (graph.maxGraphY + GRAPH_MARGIN) * scale; + + var adjustY = 0; + var adjustYCandidate = 0; + if ((maxY + baseY) < dimensions[1]) { + adjustYCandidate = dimensions[1] - (maxY + baseY); + if ((minY + baseY + adjustYCandidate) > 0) { + adjustY = (dimensions[1] / 2) - (maxY - (height / 2)) - baseY; + } else { + adjustY = adjustYCandidate; + } + } else if (-baseY < minY) { + adjustYCandidate = -(baseY + minY); + if ((maxY + baseY + adjustYCandidate) < dimensions[1]) { + adjustY = (dimensions[1] / 2) - (maxY - (height / 2)) - baseY; + } else { + adjustY = adjustYCandidate; + } + } + translate[1] += adjustY; + + var baseX = translate[0]; + var minX = (graph.minGraphX - GRAPH_MARGIN) * scale; + var maxX = (graph.maxGraphX + GRAPH_MARGIN) * scale; + + var adjustX = 0; + var adjustXCandidate = 0; + if ((maxX + baseX) < dimensions[0]) { + adjustXCandidate = dimensions[0] - (maxX + baseX); + if ((minX + baseX + adjustXCandidate) > 0) { + adjustX = (dimensions[0] / 2) - (maxX - (width / 2)) - baseX; + } else { + adjustX = adjustXCandidate; + } + } else if (-baseX < minX) { + adjustXCandidate = -(baseX + minX); + if ((maxX + baseX + adjustXCandidate) < dimensions[0]) { + adjustX = (dimensions[0] / 2) - (maxX - (width / 2)) - baseX; + } else { + adjustX = adjustXCandidate; + } + } + translate[0] += adjustX; + return translate; + } + + translateClipped(translate, scale, transition) { + var graph = this; + var graphNode = this.graphElement[0][0]; + var translate = this.getVisibleTranslation(translate, scale); + if (transition) { + graphNode.classList.add('visible-transition'); + clearTimeout(graph.transitionTimout); + graph.transitionTimout = setTimeout(function(){ + graphNode.classList.remove('visible-transition'); + }, 1000); + } + var translateString = "translate(" + translate[0] + "px," + translate[1] + "px) scale(" + scale + ")"; + graphNode.style.transform = translateString; + graph.dragSvg.translate(translate); + graph.dragSvg.scale(scale); + } + + zoomed(){ + this.state.justScaleTransGraph = true; + var scale = this.dragSvg.scale(); + this.translateClipped(d3.event.translate, scale); + } + + + getSvgViewDimensions() { + var canvasWidth = this.parentNode.clientWidth; + var documentElement = document.documentElement; + var canvasHeight = documentElement.clientHeight; + return [canvasWidth, canvasHeight]; + } + + + minScale() { + var graph = this; + var dimensions = this.getSvgViewDimensions(); + var width = graph.maxGraphX - graph.minGraphX; + var height = graph.maxGraphY - graph.minGraphY; + var minScale = dimensions[0] / (width + GRAPH_MARGIN * 2); + var minScaleYCandidate = dimensions[1] / (height + GRAPH_MARGIN * 2); + if (minScaleYCandidate < minScale) { + minScale = minScaleYCandidate; + } + this.dragSvg.scaleExtent([minScale, 1.5]); + return minScale; + } + + fitGraphViewToWindow() { + this.svg.attr("height", document.documentElement.clientHeight + "px"); + this.translateClipped(this.dragSvg.translate(), this.dragSvg.scale()); + } + + toggleTypes() { + var graph = this; + graph.state.showTypes = !graph.state.showTypes; + var element = document.getElementById('toggle-types'); + if (graph.state.showTypes) { + element.classList.add('button-input-toggled'); + } else { + element.classList.remove('button-input-toggled'); + } + graph.updateGraphVisibility(); + } + + viewSelection() { + var graph = this; + var minX, maxX, minY, maxY; + var hasSelection = false; + graph.visibleNodes.each(function(n) { + if (this.classList.contains("selected")) { + hasSelection = true; + minX = minX ? Math.min(minX, n.x) : n.x; + maxX = maxX ? Math.max(maxX, n.x + n.getTotalNodeWidth()) : + n.x + n.getTotalNodeWidth(); + minY = minY ? Math.min(minY, n.y) : n.y; + maxY = maxY ? Math.max(maxY, n.y + graph.getNodeHeight(n)) : + n.y + graph.getNodeHeight(n); + } + }); + if (hasSelection) { + graph.viewGraphRegion(minX - NODE_INPUT_WIDTH, minY - 60, + maxX + NODE_INPUT_WIDTH, maxY + 60, + true); + } + } + + viewGraphRegion(minX, minY, maxX, maxY, transition) { + var graph = this; + var dimensions = this.getSvgViewDimensions(); + var width = maxX - minX; + var height = maxY - minY; + var scale = Math.min(dimensions[0] / width, dimensions[1] / height); + scale = Math.min(1.5, scale); + scale = Math.max(graph.minScale(), scale); + var translation = [-minX*scale, -minY*scale]; + translation = graph.getVisibleTranslation(translation, scale); + graph.translateClipped(translation, scale, transition); + } + + viewWholeGraph() { + var graph = this; + var minScale = graph.minScale(); + var translation = [0, 0]; + translation = graph.getVisibleTranslation(translation, minScale); + graph.translateClipped(translation, minScale); + } +} diff --git a/deps/v8/tools/turbolizer/hide-selected.png b/deps/v8/tools/turbolizer/hide-selected.png new file mode 100644 index 00000000000000..207cdbb89aa4a2 Binary files /dev/null and b/deps/v8/tools/turbolizer/hide-selected.png differ diff --git a/deps/v8/tools/turbolizer/hide-unselected.png b/deps/v8/tools/turbolizer/hide-unselected.png new file mode 100644 index 00000000000000..15617b0939b031 Binary files /dev/null and b/deps/v8/tools/turbolizer/hide-unselected.png differ diff --git a/deps/v8/tools/turbolizer/index.html b/deps/v8/tools/turbolizer/index.html new file mode 100644 index 00000000000000..4066fd8010c362 --- /dev/null +++ b/deps/v8/tools/turbolizer/index.html @@ -0,0 +1,97 @@ + + + + Turbolizer + + + +
+
+
+      
+
+
+
+ + + + + + + + + + + +
+ +
+ + +
+
+
+
+
+          
    +
+
+
+
+ +
+
+ +
+ + +
+
+ + +
+ + + + + + + + + + + + + + + + + + + + + + diff --git a/deps/v8/tools/turbolizer/lang-disassembly.js b/deps/v8/tools/turbolizer/lang-disassembly.js new file mode 100644 index 00000000000000..590f9fd804f6a8 --- /dev/null +++ b/deps/v8/tools/turbolizer/lang-disassembly.js @@ -0,0 +1,14 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +PR.registerLangHandler( + PR.createSimpleLexer( + [ + [PR.PR_STRING, /^(?:\'(?:[^\\\'\r\n]|\\.)*(?:\'|$))/, null, '\''], + [PR.PR_PLAIN, /^\s+/, null, ' \r\n\t\xA0'] + ], + [ // fallthroughStylePatterns + [PR.PR_COMMENT, /;; debug: position \d+/, null], + ]), + ['disassembly']); diff --git a/deps/v8/tools/turbolizer/layout-icon.png b/deps/v8/tools/turbolizer/layout-icon.png new file mode 100644 index 00000000000000..95a517afa64be5 Binary files /dev/null and b/deps/v8/tools/turbolizer/layout-icon.png differ diff --git a/deps/v8/tools/turbolizer/left-arrow.png b/deps/v8/tools/turbolizer/left-arrow.png new file mode 100644 index 00000000000000..fc0603e8c3ac84 Binary files /dev/null and b/deps/v8/tools/turbolizer/left-arrow.png differ diff --git a/deps/v8/tools/turbolizer/live.png b/deps/v8/tools/turbolizer/live.png new file mode 100644 index 00000000000000..ac72bb93e82922 Binary files /dev/null and b/deps/v8/tools/turbolizer/live.png differ diff --git a/deps/v8/tools/turbolizer/monkey.js b/deps/v8/tools/turbolizer/monkey.js new file mode 100644 index 00000000000000..129f8b32684adf --- /dev/null +++ b/deps/v8/tools/turbolizer/monkey.js @@ -0,0 +1,26 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +Array.prototype.getStaggeredFromMiddle = function(i) { + if (i >= this.length) { + throw("getStaggeredFromMiddle: OOB"); + } + var middle = Math.floor(this.length / 2); + var index = middle + (((i % 2) == 0) ? (i / 2) : (((1 - i) / 2) - 1)); + return this[index]; +} + +Array.prototype.contains = function(obj) { + var i = this.length; + while (i--) { + if (this[i] === obj) { + return true; + } + } + return false; +} + +Math.alignUp = function(raw, multiple) { + return Math.floor((raw + multiple - 1) / multiple) * multiple; +} diff --git a/deps/v8/tools/turbolizer/node.js b/deps/v8/tools/turbolizer/node.js new file mode 100644 index 00000000000000..3656e5d7e5a547 --- /dev/null +++ b/deps/v8/tools/turbolizer/node.js @@ -0,0 +1,147 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +var TYPE_HEIGHT = 25; +var DEFAULT_NODE_BUBBLE_RADIUS = 12; +var NODE_INPUT_WIDTH = 50; +var MINIMUM_NODE_INPUT_APPROACH = 15 + 2 * DEFAULT_NODE_BUBBLE_RADIUS; +var MINIMUM_NODE_OUTPUT_APPROACH = 15; + +function isNodeInitiallyVisible(node) { + return node.cfg; +} + +var Node = { + isControl: function() { + return this.control; + }, + isInput: function() { + return this.opcode == 'Parameter' || this.opcode.endsWith('Constant'); + }, + isLive: function() { + return this.live !== false; + }, + isJavaScript: function() { + return this.opcode.startsWith('JS'); + }, + isSimplified: function() { + if (this.isJavaScript) return false; + return this.opcode.endsWith('Phi') || + this.opcode.startsWith('Boolean') || + this.opcode.startsWith('Number') || + this.opcode.startsWith('String') || + this.opcode.startsWith('Change') || + this.opcode.startsWith('Object') || + this.opcode.startsWith('Reference') || + this.opcode.startsWith('Any') || + this.opcode.endsWith('ToNumber') || + (this.opcode == 'AnyToBoolean') || + (this.opcode.startsWith('Load') && this.opcode.length > 4) || + (this.opcode.startsWith('Store') && this.opcode.length > 5); + }, + isMachine: function() { + return !(this.isControl() || this.isInput() || + this.isJavaScript() || this.isSimplified()); + }, + getTotalNodeWidth: function() { + var inputWidth = this.inputs.length * NODE_INPUT_WIDTH; + return Math.max(inputWidth, this.width); + }, + getTitle: function() { + var propsString; + if (this.properties === undefined) { + propsString = ""; + } else if (this.properties === "") { + propsString = "no properties"; + } else { + propsString = "[" + this.properties + "]"; + } + return this.title + "\n" + propsString + "\n" + this.opinfo; + }, + getDisplayLabel: function() { + var result = this.id + ":" + this.label; + if (result.length > 40) { + return this.id + ":" + this.opcode; + } else { + return result; + } + }, + getType: function() { + return this.type; + }, + getDisplayType: function() { + var type_string = this.type; + if (type_string == undefined) return ""; + if (type_string.length > 24) { + type_string = type_string.substr(0, 25) + "..."; + } + return type_string; + }, + deepestInputRank: function() { + var deepestRank = 0; + this.inputs.forEach(function(e) { + if (e.isVisible() && !e.isBackEdge()) { + if (e.source.rank > deepestRank) { + deepestRank = e.source.rank; + } + } + }); + return deepestRank; + }, + areAnyOutputsVisible: function() { + var visibleCount = 0; + this.outputs.forEach(function(e) { if (e.isVisible()) ++visibleCount; }); + if (this.outputs.length == visibleCount) return 2; + if (visibleCount != 0) return 1; + return 0; + }, + setOutputVisibility: function(v) { + var result = false; + this.outputs.forEach(function(e) { + e.visible = v; + if (v) { + if (!e.target.visible) { + e.target.visible = true; + result = true; + } + } + }); + return result; + }, + setInputVisibility: function(i, v) { + var edge = this.inputs[i]; + edge.visible = v; + if (v) { + if (!edge.source.visible) { + edge.source.visible = true; + return true; + } + } + return false; + }, + getInputApproach: function(index) { + return this.y - MINIMUM_NODE_INPUT_APPROACH - + (index % 4) * MINIMUM_EDGE_SEPARATION - DEFAULT_NODE_BUBBLE_RADIUS + }, + getOutputApproach: function(graph, index) { + return this.y + this.outputApproach + graph.getNodeHeight(this) + + + DEFAULT_NODE_BUBBLE_RADIUS; + }, + getInputX: function(index) { + var result = this.getTotalNodeWidth() - (NODE_INPUT_WIDTH / 2) + + (index - this.inputs.length + 1) * NODE_INPUT_WIDTH; + return result; + }, + getOutputX: function() { + return this.getTotalNodeWidth() - (NODE_INPUT_WIDTH / 2); + }, + getFunctionRelativeSourcePosition: function(graph) { + return this.pos - graph.sourcePosition; + }, + hasBackEdges: function() { + return (this.opcode == "Loop") || + ((this.opcode == "Phi" || this.opcode == "EffectPhi") && + this.inputs[this.inputs.length - 1].source.opcode == "Loop"); + } +}; diff --git a/deps/v8/tools/turbolizer/right-arrow.png b/deps/v8/tools/turbolizer/right-arrow.png new file mode 100644 index 00000000000000..ef3964346f6e4b Binary files /dev/null and b/deps/v8/tools/turbolizer/right-arrow.png differ diff --git a/deps/v8/tools/turbolizer/schedule-view.js b/deps/v8/tools/turbolizer/schedule-view.js new file mode 100644 index 00000000000000..2cd49c991fdd5d --- /dev/null +++ b/deps/v8/tools/turbolizer/schedule-view.js @@ -0,0 +1,128 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +"use strict"; + +class ScheduleView extends TextView { + constructor(id, broker) { + super(id, broker, null, false); + let view = this; + let BLOCK_STYLE = { + css: 'tag' + }; + const BLOCK_HEADER_STYLE = { + css: 'com', + block_id: -1, + location: function(text) { + let matches = /\d+/.exec(text); + if (!matches) return undefined; + BLOCK_HEADER_STYLE.block_id = Number(matches[0]); + return { + block_id: BLOCK_HEADER_STYLE.block_id + }; + }, + }; + const BLOCK_LINK_STYLE = { + css: 'tag', + link: function(text) { + let id = Number(text.substr(1)); + view.select(function(location) { return location.block_id == id; }, true, true); + } + }; + const ID_STYLE = { + css: 'tag', + location: function(text) { + let matches = /\d+/.exec(text); + return { + node_id: Number(matches[0]), + block_id: BLOCK_HEADER_STYLE.block_id + }; + }, + }; + const ID_LINK_STYLE = { + css: 'tag', + link: function(text) { + let id = Number(text); + view.select(function(location) { return location.node_id == id; }, true, true); + } + }; + const NODE_STYLE = { css: 'kwd' }; + const GOTO_STYLE = { css: 'kwd', + goto_id: -2, + location: function(text) { + return { + node_id: GOTO_STYLE.goto_id--, + block_id: BLOCK_HEADER_STYLE.block_id + }; + } + } + const ARROW_STYLE = { css: 'kwd' }; + let patterns = [ + [ + [/^--- BLOCK B\d+/, BLOCK_HEADER_STYLE, 1], + [/^\s+\d+: /, ID_STYLE, 2], + [/^\s+Goto/, GOTO_STYLE, 6], + [/^.*/, null, -1] + ], + [ + [/^ +/, null], + [/^\(deferred\)/, BLOCK_HEADER_STYLE], + [/^B\d+/, BLOCK_LINK_STYLE], + [/^<-/, ARROW_STYLE], + [/^->/, ARROW_STYLE], + [/^,/, null], + [/^---/, BLOCK_HEADER_STYLE, -1] + ], + // Parse opcode including [] + [ + [/^[A-Za-z0-9_]+(\[.*\])?$/, NODE_STYLE, -1], + [/^[A-Za-z0-9_]+(\[.*\])?/, NODE_STYLE, 3] + ], + // Parse optional parameters + [ + [/^ /, null, 4], + [/^\(/, null], + [/^\d+/, ID_LINK_STYLE], + [/^, /, null], + [/^\)$/, null, -1], + [/^\)/, null, 4], + ], + [ + [/^ -> /, ARROW_STYLE, 5], + [/^.*/, null, -1] + ], + [ + [/^B\d+$/, BLOCK_LINK_STYLE, -1], + [/^B\d+/, BLOCK_LINK_STYLE], + [/^, /, null] + ], + [ + [/^ -> /, ARROW_STYLE], + [/^B\d+$/, BLOCK_LINK_STYLE, -1] + ] + ]; + this.setPatterns(patterns); + } + + initializeContent(data, rememberedSelection) { + super.initializeContent(data, rememberedSelection); + var graph = this; + var locations = []; + for (var id of rememberedSelection) { + locations.push({ node_id : id }); + } + this.selectLocations(locations, true, true); + } + + detachSelection() { + var selection = this.selection.detachSelection(); + var s = new Set(); + for (var i of selection) { + if (i.location.node_id != undefined && i.location.node_id > 0) { + s.add(i.location.node_id); + } + }; + return s; + } +} diff --git a/deps/v8/tools/turbolizer/search.png b/deps/v8/tools/turbolizer/search.png new file mode 100644 index 00000000000000..12dc3e34691d58 Binary files /dev/null and b/deps/v8/tools/turbolizer/search.png differ diff --git a/deps/v8/tools/turbolizer/search2.png b/deps/v8/tools/turbolizer/search2.png new file mode 100644 index 00000000000000..88dd193809d05d Binary files /dev/null and b/deps/v8/tools/turbolizer/search2.png differ diff --git a/deps/v8/tools/turbolizer/selection-broker.js b/deps/v8/tools/turbolizer/selection-broker.js new file mode 100644 index 00000000000000..822cf1ce1f62fd --- /dev/null +++ b/deps/v8/tools/turbolizer/selection-broker.js @@ -0,0 +1,99 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +var SelectionBroker = function() { + this.brokers = []; + this.dispatching = false; + this.lastDispatchingHandler = null; + this.nodePositionMap = []; + this.sortedPositionList = []; + this.positionNodeMap = []; +}; + +SelectionBroker.prototype.addSelectionHandler = function(handler) { + this.brokers.push(handler); +} + +SelectionBroker.prototype.setNodePositionMap = function(map) { + let broker = this; + if (!map) return; + broker.nodePositionMap = map; + broker.positionNodeMap = []; + broker.sortedPositionList = []; + let next = 0; + for (let i in broker.nodePositionMap) { + broker.sortedPositionList[next] = Number(broker.nodePositionMap[i]); + broker.positionNodeMap[next++] = i; + } + broker.sortedPositionList = sortUnique(broker.sortedPositionList, + function(a,b) { return a - b; }); + this.positionNodeMap.sort(function(a,b) { + let result = broker.nodePositionMap[a] - broker.nodePositionMap[b]; + if (result != 0) return result; + return a - b; + }); +} + +SelectionBroker.prototype.select = function(from, locations, selected) { + let broker = this; + if (!broker.dispatching) { + broker.lastDispatchingHandler = from; + try { + broker.dispatching = true; + let enrichLocations = function(locations) { + result = []; + for (let location of locations) { + let newLocation = {}; + if (location.pos_start != undefined) { + newLocation.pos_start = location.pos_start; + } + if (location.pos_end != undefined) { + newLocation.pos_end = location.pos_end; + } + if (location.node_id != undefined) { + newLocation.node_id = location.node_id; + } + if (location.block_id != undefined) { + newLocation.block_id = location.block_id; + } + if (newLocation.pos_start == undefined && + newLocation.pos_end == undefined && + newLocation.node_id != undefined) { + if (broker.nodePositionMap && broker.nodePositionMap[location.node_id]) { + newLocation.pos_start = broker.nodePositionMap[location.node_id]; + newLocation.pos_end = location.pos_start + 1; + } + } + result.push(newLocation); + } + return result; + } + locations = enrichLocations(locations); + for (var b of this.brokers) { + if (b != from) { + b.brokeredSelect(locations, selected); + } + } + } + finally { + broker.dispatching = false; + } + } +} + +SelectionBroker.prototype.clear = function(from) { + this.lastDispatchingHandler = null; + if (!this.dispatching) { + try { + this.dispatching = true; + this.brokers.forEach(function(b) { + if (b != from) { + b.brokeredClear(); + } + }); + } finally { + this.dispatching = false; + } + } +} diff --git a/deps/v8/tools/turbolizer/selection.js b/deps/v8/tools/turbolizer/selection.js new file mode 100644 index 00000000000000..26f1bde1972606 --- /dev/null +++ b/deps/v8/tools/turbolizer/selection.js @@ -0,0 +1,108 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +var Selection = function(handler) { + this.handler = handler; + this.selectionBase = null; + this.lastSelection = null; + this.selection = new Set(); +} + + +Selection.prototype.isEmpty = function() { + return this.selection.size == 0; +} + + +Selection.prototype.clear = function() { + var handler = this.handler; + this.selectionBase = null; + this.lastSelection = null; + handler.select(this.selection, false); + handler.clear(); + this.selection = new Set(); +} + + +count = 0; + +Selection.prototype.select = function(s, isSelected) { + var handler = this.handler; + if (!(Symbol.iterator in Object(s))) { s = [s]; } + if (isSelected) { + let first = true; + for (let i of s) { + if (first) { + this.selectionBase = i; + this.lastSelection = i; + first = false; + } + this.selection.add(i); + } + handler.select(this.selection, true); + } else { + let unselectSet = new Set(); + for (let i of s) { + if (this.selection.has(i)) { + unselectSet.add(i); + this.selection.delete(i); + } + } + handler.select(unselectSet, false); + } +} + + +Selection.prototype.extendTo = function(pos) { + if (pos == this.lastSelection || this.lastSelection === null) return; + + var handler = this.handler; + var pos_diff = handler.selectionDifference(pos, true, this.lastSelection, false); + var unselect_diff = []; + if (pos_diff.length == 0) { + pos_diff = handler.selectionDifference(this.selectionBase, false, pos, true); + if (pos_diff.length != 0) { + unselect_diff = handler.selectionDifference(this.lastSelection, true, this.selectionBase, false); + this.selection = new Set(); + this.selection.add(this.selectionBase); + for (var d of pos_diff) { + this.selection.add(d); + } + } else { + unselect_diff = handler.selectionDifference(this.lastSelection, true, pos, false); + for (var d of unselect_diff) { + this.selection.delete(d); + } + } + } else { + unselect_diff = handler.selectionDifference(this.selectionBase, false, this.lastSelection, true); + if (unselect_diff != 0) { + pos_diff = handler.selectionDifference(pos, true, this.selectionBase, false); + if (pos_diff.length == 0) { + unselect_diff = handler.selectionDifference(pos, false, this.lastSelection, true); + } + for (var d of unselect_diff) { + this.selection.delete(d); + } + } + if (pos_diff.length != 0) { + for (var d of pos_diff) { + this.selection.add(d); + } + } + } + handler.select(unselect_diff, false); + handler.select(pos_diff, true); + this.lastSelection = pos; +} + + +Selection.prototype.detachSelection = function() { + var result = new Set(); + for (var i of this.selection) { + result.add(i); + } + this.clear(); + return result; +} diff --git a/deps/v8/tools/turbolizer/text-view.js b/deps/v8/tools/turbolizer/text-view.js new file mode 100644 index 00000000000000..6822500dde6daf --- /dev/null +++ b/deps/v8/tools/turbolizer/text-view.js @@ -0,0 +1,296 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +"use strict"; + +class TextView extends View { + constructor(id, broker, patterns, allowSpanSelection) { + super(id, broker); + let view = this; + view.hide(); + view.textListNode = view.divNode.getElementsByTagName('ul')[0]; + view.fillerSvgElement = view.divElement.append("svg").attr('version','1.1').attr("width", "0"); + view.patterns = patterns; + view.allowSpanSelection = allowSpanSelection; + view.nodeToLineMap = []; + var selectionHandler = { + clear: function() { + broker.clear(selectionHandler); + }, + select: function(items, selected) { + for (let i of items) { + if (selected) { + i.classList.add("selected"); + } else { + i.classList.remove("selected"); + } + } + broker.clear(selectionHandler); + broker.select(selectionHandler, view.getLocations(items), selected); + }, + selectionDifference: function(span1, inclusive1, span2, inclusive2) { + return null; + }, + brokeredSelect: function(locations, selected) { + view.selectLocations(locations, selected, true); + }, + brokeredClear: function() { + view.selection.clear(); + } + }; + view.selection = new Selection(selectionHandler); + broker.addSelectionHandler(selectionHandler); + } + + setPatterns(patterns) { + let view = this; + view.patterns = patterns; + } + + clearText() { + let view = this; + while (view.textListNode.firstChild) { + view.textListNode.removeChild(view.textListNode.firstChild); + } + } + + sameLocation(l1, l2) { + let view = this; + if (l1.block_id != undefined && l2.block_id != undefined && + l1.block_id == l2.block_id && l1.node_id === undefined) { + return true; + } + + if (l1.address != undefined && l1.address == l2.address) { + return true; + } + + let node1 = l1.node_id; + let node2 = l2.node_id; + + if (node1 === undefined || node2 == undefined) { + if (l1.pos_start === undefined || l2.pos_start == undefined) { + return false; + } + if (l1.pos_start == -1 || l2.pos_start == -1) { + return false; + } + if (l1.pos_start < l2.pos_start) { + return l1.pos_end > l2.pos_start; + } { + return l1.pos_start < l2.pos_end; + } + } + + return l1.node_id == l2.node_id; + } + + selectLocations(locations, selected, makeVisible) { + let view = this; + let s = new Set(); + for (let l of locations) { + for (let i = 0; i < view.textListNode.children.length; ++i) { + let child = view.textListNode.children[i]; + if (child.location != undefined && view.sameLocation(l, child.location)) { + s.add(child); + } + } + } + view.selectCommon(s, selected, makeVisible); + } + + getLocations(items) { + let result = []; + let lastObject = null; + for (let i of items) { + if (i.location) { + result.push(i.location); + } + } + return result; + } + + createFragment(text, style) { + let view = this; + let span = document.createElement("SPAN"); + span.onmousedown = function(e) { + view.mouseDownSpan(span, e); + } + if (style != undefined) { + span.classList.add(style); + } + span.innerHTML = text; + return span; + } + + appendFragment(li, fragment) { + li.appendChild(fragment); + } + + processLine(line) { + let view = this; + let result = []; + let patternSet = 0; + while (true) { + let beforeLine = line; + for (let pattern of view.patterns[patternSet]) { + let matches = line.match(pattern[0]); + if (matches != null) { + if (matches[0] != '') { + let style = pattern[1] != null ? pattern[1] : {}; + let text = matches[0]; + if (text != '') { + let fragment = view.createFragment(matches[0], style.css); + if (style.link) { + fragment.classList.add('linkable-text'); + fragment.link = style.link; + } + result.push(fragment); + if (style.location != undefined) { + let location = style.location(text); + if (location != undefined) { + fragment.location = location; + } + } + } + line = line.substr(matches[0].length); + } + let nextPatternSet = patternSet; + if (pattern.length > 2) { + nextPatternSet = pattern[2]; + } + if (line == "") { + if (nextPatternSet != -1) { + throw("illegal parsing state in text-view in patternSet" + patternSet); + } + return result; + } + patternSet = nextPatternSet; + break; + } + } + if (beforeLine == line) { + throw("input not consumed in text-view in patternSet" + patternSet); + } + } + } + + select(s, selected, makeVisible) { + let view = this; + view.selection.clear(); + view.selectCommon(s, selected, makeVisible); + } + + selectCommon(s, selected, makeVisible) { + let view = this; + let firstSelect = makeVisible && view.selection.isEmpty(); + if ((typeof s) === 'function') { + for (let i = 0; i < view.textListNode.children.length; ++i) { + let child = view.textListNode.children[i]; + if (child.location && s(child.location)) { + if (firstSelect) { + makeContainerPosVisible(view.parentNode, child.offsetTop); + firstSelect = false; + } + view.selection.select(child, selected); + } + } + } else if (typeof s[Symbol.iterator] === 'function') { + if (firstSelect) { + for (let i of s) { + makeContainerPosVisible(view.parentNode, i.offsetTop); + break; + } + } + view.selection.select(s, selected); + } else { + if (firstSelect) { + makeContainerPosVisible(view.parentNode, s.offsetTop); + } + view.selection.select(s, selected); + } + } + + mouseDownLine(li, e) { + let view = this; + e.stopPropagation(); + if (!e.shiftKey) { + view.selection.clear(); + } + if (li.location != undefined) { + view.selectLocations([li.location], true, false); + } + } + + mouseDownSpan(span, e) { + let view = this; + if (view.allowSpanSelection) { + e.stopPropagation(); + if (!e.shiftKey) { + view.selection.clear(); + } + select(li, true); + } else if (span.link) { + span.link(span.textContent); + e.stopPropagation(); + } + } + + processText(text) { + let view = this; + let textLines = text.split(/[\n]/); + let lineNo = 0; + for (let line of textLines) { + let li = document.createElement("LI"); + li.onmousedown = function(e) { + view.mouseDownLine(li, e); + } + li.className = "nolinenums"; + li.lineNo = lineNo++; + let fragments = view.processLine(line); + for (let fragment of fragments) { + view.appendFragment(li, fragment); + } + let lineLocation = view.lineLocation(li); + if (lineLocation != undefined) { + li.location = lineLocation; + } + view.textListNode.appendChild(li); + } + } + + initializeContent(data, rememberedSelection) { + let view = this; + view.selection.clear(); + view.clearText(); + view.processText(data); + var fillerSize = document.documentElement.clientHeight - + view.textListNode.clientHeight; + if (fillerSize < 0) { + fillerSize = 0; + } + view.fillerSvgElement.attr("height", fillerSize); + } + + deleteContent() { + } + + isScrollable() { + return true; + } + + detachSelection() { + return null; + } + + lineLocation(li) { + let view = this; + for (let i = 0; i < li.children.length; ++i) { + let fragment = li.children[i]; + if (fragment.location != undefined && !view.allowSpanSelection) { + return fragment.location; + } + } + } +} diff --git a/deps/v8/tools/turbolizer/turbo-visualizer.css b/deps/v8/tools/turbolizer/turbo-visualizer.css new file mode 100644 index 00000000000000..69a6ccabb54dd1 --- /dev/null +++ b/deps/v8/tools/turbolizer/turbo-visualizer.css @@ -0,0 +1,340 @@ +.visible-transition { + transition-delay: 0s; + transition-duration: 1s; + transition-property: all; + transition-timing-function: ease; +} + +.collapse-pane { + background: #A0A0A0; + bottom: 0; + position: absolute; + margin-bottom: 0.5em; + margin-right: 0.5em; + margin-left: 0.5em; + border-radius: 5px; + padding: 0.5em; + z-index: 5; + opacity: 0.7; + cursor: pointer; +} + +.search-input { + vertical-align: middle; + width: 145px; + opacity: 1; +} + +.button-input { + vertical-align: middle; + width: 24px; + opacity: 0.4; + cursor: pointer; +} + +.button-input-toggled { + border-radius: 5px; + background-color: #505050; +} + +.button-input:focus { + outline: none; +} + +.button-input-invisible { + vertical-align: middle; + width: 0px; + visibility: hidden; +} + + +.selected { + background-color: #FFFF33; +} + +.prettyprint ol.linenums > li { + list-style-type: decimal; + !important +} + +body { + margin: 0; + padding: 0; + overflow:hidden; + -webkit-touch-callout: none; + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; +} + +p { + text-align: center; + overflow: overlay; + position: relative; +} + +marker { + fill: #080808; +} + +g rect { + fill: #F0F0F0; + stroke: #080808; + stroke-width: 2px; +} + +g.dead { + opacity: .5; +} + +g.unsorted rect { + opacity: 0.5; +} + +div.scrollable { + overflow-y: _croll; overflow-x: hidden; +} + +g.control rect { + fill: #EFCC00; + stroke: #080808; + stroke-width: 5px; +} + +g.javascript rect { + fill: #DD7E6B; +} + +g.simplified rect { + fill: #3C78D8; +} + +g.machine rect { + fill: #6AA84F; +} + +g.input rect { + fill: #CFE2F3; +} + +g.selected rect { + fill: #FFFF33; +} + +circle.bubbleStyle { + fill: #080808; + fill-opacity: 0.0; + stroke: #080808; + stroke-width: 2px; +} + +circle.bubbleStyle:hover { + stroke-width: 3px; +} + +circle.filledBubbleStyle { + fill: #080808; + stroke: #080808; + stroke-width: 2px; +} + +circle.filledBubbleStyle:hover { + fill: #080808; + stroke-width: 3px; +} + +circle.halfFilledBubbleStyle { + fill: #808080; + stroke: #101010; + stroke-width: 2px; +} + +circle.halfFilledBubbleStyle:hover { + fill: #808080; + stroke-width: 3px; +} + +path.effect { + fill: none; + stroke: #080808; + stroke-width: 4px; + cursor: default; +} + +path.effect:hover { + stroke-width: 6px; +} + +path.control { + fill: none; + stroke: #080808; + stroke-width: 4px; + cursor: default; +} + +path.control:hover { + stroke-width: 6px; +} + +path.value { + fill: none; + stroke: #888888; + stroke-width: 4px; + cursor: default; +} + +path.value:hover { + stroke-width: 6px; +} + +path.frame-state { + fill: none; + stroke: #080808; + stroke-width: 4px; + cursor: default; +} + +path.frame-state:hover{ + stroke-width: 6px; +} + +path.hidden { + fill: none; + stroke-width: 0; +} + +path.link.selected { + stroke: #FFFF33; +} + +pre.prettyprint { + border: none !important; + padding: 0px; +} + +li.L1, +li.L3, +li.L5, +li.L7, +li.L9 { + background: none !important +} + +li.nolinenums { + list-style-type:none; +} + +ul.noindent { + -webkit-padding-start: 0px; + -webkit-margin-before: 0px; + -webkit-margin-after: 0px; +} + +input:hover, .collapse-pane:hover input { + opacity: 1; + cursor: pointer; +} + +span.linkable-text { + text-decoration: underline; +} + +span.linkable-text:hover { + cursor: pointer; + font-weight: bold; +} + +#left { + float: left; height: 100%; background-color: #FFFFFF; + -webkit-transition: all 1s ease-in-out; + -moz-transition: all 1s ease-in-out; + -o-transition: all 1s ease-in-out; + transition: all .3s ease-in-out; + transition-property: width; +} + +#middle { + float:left; height: 100%; background-color: #F8F8F8; + -webkit-transition: all 1s ease-in-out; + -moz-transition: all 1s ease-in-out; + -o-transition: all 1s ease-in-out; + transition: all .3s ease-in-out; + transition-property: width; +} + +#right { + float: right; background-color: #FFFFFF; + -webkit-transition: all 1s ease-in-out; + -moz-transition: all 1s ease-in-out; + -o-transition: all 1s ease-in-out; + transition: all .3s ease-in-out; + transition-property: width; +} + +#disassembly-collapse { + right: 0; +} + +#source-collapse { + left: 0; +} + +#graph-toolbox-anchor { + height: 0px; +} + +#graph-toolbox { + position: relative; + top: 1em; + left: 0.7em; + border: 2px solid #eee8d5; + border-radius: 5px; + padding: 0.7em; + z-index: 5; + background: rgba(100%, 100%, 100%, 0.7); +} + +#disassembly-toolbox { + position: relative; + top: 1em; + left: 0.7em; + border: 2px solid #eee8d5; + border-radius: 5px; + padding: 0.7em; + z-index: 5; +} + +#load-file { + position: absolute; + top: 0; + right: 0; + margin-top: 0.5em; + margin-right: 0.5em; + z-index: 5; + opacity: 0.7; +} + +#load-file input { + background: #A0A0A0; + border-radius: 5px; + padding: 0.5em; +} + +#hidden-file-upload { + display: none; +} + +.prof { + cursor: default; +} + +tspan { + font-size: 500%; + font-family: sans-serif; +} + +text { + dominant-baseline: text-before-edge; +} \ No newline at end of file diff --git a/deps/v8/tools/turbolizer/turbo-visualizer.js b/deps/v8/tools/turbolizer/turbo-visualizer.js new file mode 100644 index 00000000000000..280caf01db5057 --- /dev/null +++ b/deps/v8/tools/turbolizer/turbo-visualizer.js @@ -0,0 +1,246 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +document.onload = (function(d3){ + "use strict"; + var jsonObj; + var sourceExpandClassList = document.getElementById(SOURCE_EXPAND_ID).classList; + var sourceCollapseClassList = document.getElementById(SOURCE_COLLAPSE_ID).classList; + var sourceExpanded = sourceCollapseClassList.contains(COLLAPSE_PANE_BUTTON_VISIBLE); + var disassemblyExpandClassList = document.getElementById(DISASSEMBLY_EXPAND_ID).classList; + var disassemblyCollapseClassList = document.getElementById(DISASSEMBLY_COLLAPSE_ID).classList; + var disassemblyExpanded = disassemblyCollapseClassList.contains(COLLAPSE_PANE_BUTTON_VISIBLE); + var svg = null; + var graph = null; + var schedule = null; + var empty = null; + var currentPhaseView = null; + var disassemblyView = null; + var sourceView = null; + var selectionBroker = null; + + function updatePanes() { + if (sourceExpanded) { + if (disassemblyExpanded) { + d3.select("#" + SOURCE_PANE_ID).style(WIDTH, "30%"); + d3.select("#" + INTERMEDIATE_PANE_ID).style(WIDTH, "40%"); + d3.select("#" + GENERATED_PANE_ID).style(WIDTH, "30%"); + } else { + d3.select("#" + SOURCE_PANE_ID).style(WIDTH, "50%"); + d3.select("#" + INTERMEDIATE_PANE_ID).style(WIDTH, "50%"); + d3.select("#" + GENERATED_PANE_ID).style(WIDTH, "0%"); + } + } else { + if (disassemblyExpanded) { + d3.select("#" + SOURCE_PANE_ID).style(WIDTH, "0%"); + d3.select("#" + INTERMEDIATE_PANE_ID).style(WIDTH, "50%"); + d3.select("#" + GENERATED_PANE_ID).style(WIDTH, "50%"); + } else { + d3.select("#" + SOURCE_PANE_ID).style(WIDTH, "0%"); + d3.select("#" + INTERMEDIATE_PANE_ID).style(WIDTH, "100%"); + d3.select("#" + GENERATED_PANE_ID).style(WIDTH, "0%"); + } + } + } + + function getLastExpandedState(type, default_state) { + var state = window.sessionStorage.getItem("expandedState-"+type); + if (state === null) return default_state; + return state === 'true'; + } + + function setLastExpandedState(type, state) { + window.sessionStorage.setItem("expandedState-"+type, state); + } + + function toggleSourceExpanded() { + setSourceExpanded(!sourceExpanded); + } + + function setSourceExpanded(newState) { + sourceExpanded = newState; + setLastExpandedState("source", newState); + updatePanes(); + if (newState) { + sourceCollapseClassList.add(COLLAPSE_PANE_BUTTON_VISIBLE); + sourceCollapseClassList.remove(COLLAPSE_PANE_BUTTON_INVISIBLE); + sourceExpandClassList.add(COLLAPSE_PANE_BUTTON_INVISIBLE); + sourceExpandClassList.remove(COLLAPSE_PANE_BUTTON_VISIBLE); + } else { + sourceCollapseClassList.add(COLLAPSE_PANE_BUTTON_INVISIBLE); + sourceCollapseClassList.remove(COLLAPSE_PANE_BUTTON_VISIBLE); + sourceExpandClassList.add(COLLAPSE_PANE_BUTTON_VISIBLE); + sourceExpandClassList.remove(COLLAPSE_PANE_BUTTON_INVISIBLE); + } + } + + function toggleDisassemblyExpanded() { + setDisassemblyExpanded(!disassemblyExpanded); + } + + function setDisassemblyExpanded(newState) { + disassemblyExpanded = newState; + setLastExpandedState("disassembly", newState); + updatePanes(); + if (newState) { + disassemblyCollapseClassList.add(COLLAPSE_PANE_BUTTON_VISIBLE); + disassemblyCollapseClassList.remove(COLLAPSE_PANE_BUTTON_INVISIBLE); + disassemblyExpandClassList.add(COLLAPSE_PANE_BUTTON_INVISIBLE); + disassemblyExpandClassList.remove(COLLAPSE_PANE_BUTTON_VISIBLE); + } else { + disassemblyCollapseClassList.add(COLLAPSE_PANE_BUTTON_INVISIBLE); + disassemblyCollapseClassList.remove(COLLAPSE_PANE_BUTTON_VISIBLE); + disassemblyExpandClassList.add(COLLAPSE_PANE_BUTTON_VISIBLE); + disassemblyExpandClassList.remove(COLLAPSE_PANE_BUTTON_INVISIBLE); + } + } + + function hideCurrentPhase() { + var rememberedSelection = null; + if (currentPhaseView != null) { + rememberedSelection = currentPhaseView.detachSelection(); + currentPhaseView.hide(); + currentPhaseView = null; + } + return rememberedSelection; + } + + function displayPhaseView(view, data) { + var rememberedSelection = hideCurrentPhase(); + view.show(data, rememberedSelection); + d3.select("#middle").classed("scrollable", view.isScrollable()); + currentPhaseView = view; + } + + function displayPhase(phase) { + if (phase.type == 'graph') { + displayPhaseView(graph, phase.data); + } else if (phase.type == 'schedule') { + displayPhaseView(schedule, phase.data); + } else { + displayPhaseView(empty, null); + } + } + + function fitPanesToParents() { + d3.select("#left").classed("scrollable", false) + d3.select("#right").classed("scrollable", false); + + graph.fitGraphViewToWindow(); + disassemblyView.resizeToParent(); + sourceView.resizeToParent(); + + d3.select("#left").classed("scrollable", true); + d3.select("#right").classed("scrollable", true); + } + + selectionBroker = new SelectionBroker(); + + function initializeHandlers(g) { + d3.select("#source-collapse").on("click", function(){ + toggleSourceExpanded(true); + setTimeout(function(){ + g.fitGraphViewToWindow(); + }, 300); + }); + d3.select("#disassembly-collapse").on("click", function(){ + toggleDisassemblyExpanded(); + setTimeout(function(){ + g.fitGraphViewToWindow(); + }, 300); + }); + window.onresize = function(){ + fitPanesToParents(); + }; + d3.select("#hidden-file-upload").on("change", function() { + if (window.File && window.FileReader && window.FileList) { + var uploadFile = this.files[0]; + var filereader = new window.FileReader(); + var consts = Node.consts; + filereader.onload = function(){ + var txtRes = filereader.result; + // If the JSON isn't properly terminated, assume compiler crashed and + // add best-guess empty termination + if (txtRes[txtRes.length-2] == ',') { + txtRes += '{"name":"disassembly","type":"disassembly","data":""}]}'; + } + try{ + jsonObj = JSON.parse(txtRes); + + hideCurrentPhase(); + + selectionBroker.setNodePositionMap(jsonObj.nodePositions); + + sourceView.initializeCode(jsonObj.source, jsonObj.sourcePosition); + disassemblyView.initializeCode(jsonObj.source); + + var selectMenu = document.getElementById('display-selector'); + var disassemblyPhase = null; + selectMenu.innerHTML = ''; + for (var i = 0; i < jsonObj.phases.length; ++i) { + var optionElement = document.createElement("option"); + optionElement.text = jsonObj.phases[i].name; + if (optionElement.text == 'disassembly') { + disassemblyPhase = jsonObj.phases[i]; + } else { + selectMenu.add(optionElement, null); + } + } + + disassemblyView.initializePerfProfile(jsonObj.eventCounts); + disassemblyView.show(disassemblyPhase.data, null); + + var initialPhaseIndex = +window.sessionStorage.getItem("lastSelectedPhase"); + if (!(initialPhaseIndex in jsonObj.phases)) { + initialPhaseIndex = 0; + } + + // We wish to show the remembered phase {lastSelectedPhase}, but + // this will crash if the first view we switch to is a + // ScheduleView. So we first switch to the first phase, which + // should never be a ScheduleView. + displayPhase(jsonObj.phases[0]); + displayPhase(jsonObj.phases[initialPhaseIndex]); + selectMenu.selectedIndex = initialPhaseIndex; + + selectMenu.onchange = function(item) { + window.sessionStorage.setItem("lastSelectedPhase", selectMenu.selectedIndex); + displayPhase(jsonObj.phases[selectMenu.selectedIndex]); + } + + fitPanesToParents(); + + d3.select("#search-input").attr("value", window.sessionStorage.getItem("lastSearch") || ""); + + } + catch(err) { + window.console.log("caught exception, clearing session storage just in case"); + window.sessionStorage.clear(); // just in case + window.console.log("showing error"); + window.alert("Invalid TurboFan JSON file\n" + + "error: " + err.message); + return; + } + }; + filereader.readAsText(uploadFile); + } else { + alert("Can't load graph"); + } + }); + } + + sourceView = new CodeView(SOURCE_PANE_ID, PR, "", 0, selectionBroker); + disassemblyView = new DisassemblyView(DISASSEMBLY_PANE_ID, selectionBroker); + graph = new GraphView(d3, GRAPH_PANE_ID, [], [], selectionBroker); + schedule = new ScheduleView(SCHEDULE_PANE_ID, selectionBroker); + empty = new EmptyView(EMPTY_PANE_ID, selectionBroker); + + initializeHandlers(graph); + + setSourceExpanded(getLastExpandedState("source", true)); + setDisassemblyExpanded(getLastExpandedState("disassembly", false)); + + displayPhaseView(empty, null); + fitPanesToParents(); +})(window.d3); diff --git a/deps/v8/tools/turbolizer/types.png b/deps/v8/tools/turbolizer/types.png new file mode 100644 index 00000000000000..8fead8f079da0c Binary files /dev/null and b/deps/v8/tools/turbolizer/types.png differ diff --git a/deps/v8/tools/turbolizer/upload-icon.png b/deps/v8/tools/turbolizer/upload-icon.png new file mode 100644 index 00000000000000..c1a289b76daca2 Binary files /dev/null and b/deps/v8/tools/turbolizer/upload-icon.png differ diff --git a/deps/v8/tools/turbolizer/util.js b/deps/v8/tools/turbolizer/util.js new file mode 100644 index 00000000000000..282221af483355 --- /dev/null +++ b/deps/v8/tools/turbolizer/util.js @@ -0,0 +1,80 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +"use strict"; + +function makeContainerPosVisible(container, pos) { + var height = container.offsetHeight; + var margin = Math.floor(height / 4); + if (pos < container.scrollTop + margin) { + pos -= margin; + if (pos < 0) pos = 0; + container.scrollTop = pos; + return; + } + if (pos > (container.scrollTop + 3 * margin)) { + pos = pos - 3 * margin; + if (pos < 0) pos = 0; + container.scrollTop = pos; + } +} + + +function lowerBound(a, value, compare, lookup) { + let first = 0; + let count = a.length; + while (count > 0) { + let step = Math.floor(count / 2); + let middle = first + step; + let middle_value = (lookup === undefined) ? a[middle] : lookup(a, middle); + let result = (compare === undefined) ? (middle_value < value) : compare(middle_value, value); + if (result) { + first = middle + 1; + count -= step + 1; + } else { + count = step; + } + } + return first; +} + + +function upperBound(a, value, compare, lookup) { + let first = 0; + let count = a.length; + while (count > 0) { + let step = Math.floor(count / 2); + let middle = first + step; + let middle_value = (lookup === undefined) ? a[middle] : lookup(a, middle); + let result = (compare === undefined) ? (value < middle_value) : compare(value, middle_value); + if (!result) { + first = middle + 1; + count -= step + 1; + } else { + count = step; + } + } + return first; +} + + +function sortUnique(arr, f) { + arr = arr.sort(f); + let ret = [arr[0]]; + for (var i = 1; i < arr.length; i++) { + if (arr[i-1] !== arr[i]) { + ret.push(arr[i]); + } + } + return ret; +} + +// Partial application without binding the receiver +function partial(f) { + var arguments1 = Array.prototype.slice.call(arguments, 1); + return function() { + var arguments2 = Array.from(arguments); + f.apply(this, arguments1.concat(arguments2)); + } +} diff --git a/deps/v8/tools/turbolizer/view.js b/deps/v8/tools/turbolizer/view.js new file mode 100644 index 00000000000000..1ce1056a7fe50e --- /dev/null +++ b/deps/v8/tools/turbolizer/view.js @@ -0,0 +1,45 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +"use strict"; + +class View { + constructor(id, broker) { + this.divElement = d3.select("#" + id); + this.divNode = this.divElement[0][0]; + this.parentNode = this.divNode.parentNode; + } + + isScrollable() { + return false; + } + + show(data, rememberedSelection) { + this.parentNode.appendChild(this.divElement[0][0]); + this.initializeContent(data, rememberedSelection); + this.resizeToParent(); + this.divElement.attr(VISIBILITY, 'visible'); + } + + resizeToParent() { + var view = this; + var documentElement = document.documentElement; + var y; + if (this.parentNode.clientHeight) + y = Math.max(this.parentNode.clientHeight, documentElement.clientHeight); + else + y = documentElement.clientHeight; + this.parentNode.style.height = y + 'px'; + } + + hide() { + this.divElement.attr(VISIBILITY, 'hidden'); + this.deleteContent(); + this.parentNode.removeChild(this.divNode); + } + + detachSelection() { + return null; + } +} diff --git a/deps/v8/tools/update-wasm-fuzzers.sh b/deps/v8/tools/update-wasm-fuzzers.sh new file mode 100755 index 00000000000000..3652829c8d84fd --- /dev/null +++ b/deps/v8/tools/update-wasm-fuzzers.sh @@ -0,0 +1,56 @@ +#!/bin/bash +# Copyright 2016 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +set -e + +TOOLS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +cd ${TOOLS_DIR}/.. + +rm -rf test/fuzzer/wasm +rm -rf test/fuzzer/wasm_asmjs + +make x64.debug -j + +mkdir -p test/fuzzer/wasm +mkdir -p test/fuzzer/wasm_asmjs + +# asm.js +./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \ + --mode=debug --no-presubmit --extra-flags="--dump-wasm-module \ + --dump-wasm-module-path=./test/fuzzer/wasm_asmjs/" mjsunit/wasm/asm* +./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \ + --mode=debug --no-presubmit --extra-flags="--dump-wasm-module \ + --dump-wasm-module-path=./test/fuzzer/wasm_asmjs/" mjsunit/asm/* +./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \ + --mode=debug --no-presubmit --extra-flags="--dump-wasm-module \ + --dump-wasm-module-path=./test/fuzzer/wasm_asmjs/" mjsunit/regress/asm/* +# WASM +./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \ + --mode=debug --no-presubmit --extra-flags="--dump-wasm-module \ + --dump-wasm-module-path=./test/fuzzer/wasm/" unittests +./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \ + --mode=debug --no-presubmit --extra-flags="--dump-wasm-module \ + --dump-wasm-module-path=./test/fuzzer/wasm/" mjsunit/wasm/* +./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \ + --mode=debug --no-presubmit --extra-flags="--dump-wasm-module \ + --dump-wasm-module-path=./test/fuzzer/wasm/" \ + $(cd test/; ls cctest/wasm/test-*.cc | \ + sed -es/wasm\\///g | sed -es/[.]cc/\\/\\*/g) + +# Delete items over 20k. +for x in $(find ./test/fuzzer/wasm/ -type f -size +20k) +do + rm $x +done +for x in $(find ./test/fuzzer/wasm_asmjs/ -type f -size +20k) +do + rm $x +done + +# Upload changes. +cd test/fuzzer +upload_to_google_storage.py -a -b v8-wasm-fuzzer wasm +upload_to_google_storage.py -a -b v8-wasm-asmjs-fuzzer wasm_asmjs diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py index 39cfeb1707322b..0ff0cf32b0ef4c 100644 --- a/deps/v8/tools/v8heapconst.py +++ b/deps/v8/tools/v8heapconst.py @@ -55,8 +55,8 @@ 132: "MAP_TYPE", 133: "CODE_TYPE", 131: "ODDBALL_TYPE", - 171: "CELL_TYPE", - 174: "PROPERTY_CELL_TYPE", + 169: "CELL_TYPE", + 172: "PROPERTY_CELL_TYPE", 129: "HEAP_NUMBER_TYPE", 134: "MUTABLE_HEAP_NUMBER_TYPE", 135: "FOREIGN_TYPE", @@ -85,210 +85,217 @@ 160: "ALLOCATION_MEMENTO_TYPE", 159: "ALLOCATION_SITE_TYPE", 161: "SCRIPT_TYPE", - 162: "CODE_CACHE_TYPE", - 163: "POLYMORPHIC_CODE_CACHE_TYPE", - 164: "TYPE_FEEDBACK_INFO_TYPE", - 165: "ALIASED_ARGUMENTS_ENTRY_TYPE", - 166: "BOX_TYPE", - 175: "PROTOTYPE_INFO_TYPE", - 176: "SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION_TYPE", - 169: "FIXED_ARRAY_TYPE", + 162: "TYPE_FEEDBACK_INFO_TYPE", + 163: "ALIASED_ARGUMENTS_ENTRY_TYPE", + 164: "BOX_TYPE", + 173: "PROTOTYPE_INFO_TYPE", + 174: "CONTEXT_EXTENSION_TYPE", + 167: "FIXED_ARRAY_TYPE", 148: "FIXED_DOUBLE_ARRAY_TYPE", - 170: "SHARED_FUNCTION_INFO_TYPE", - 172: "WEAK_CELL_TYPE", - 173: "TRANSITION_ARRAY_TYPE", - 179: "JS_MESSAGE_OBJECT_TYPE", - 178: "JS_VALUE_TYPE", - 180: "JS_DATE_TYPE", - 181: "JS_OBJECT_TYPE", - 182: "JS_CONTEXT_EXTENSION_OBJECT_TYPE", - 183: "JS_GENERATOR_OBJECT_TYPE", - 184: "JS_MODULE_TYPE", - 185: "JS_GLOBAL_OBJECT_TYPE", - 186: "JS_GLOBAL_PROXY_TYPE", - 187: "JS_ARRAY_TYPE", - 188: "JS_ARRAY_BUFFER_TYPE", - 189: "JS_TYPED_ARRAY_TYPE", - 190: "JS_DATA_VIEW_TYPE", - 177: "JS_PROXY_TYPE", - 191: "JS_SET_TYPE", - 192: "JS_MAP_TYPE", - 193: "JS_SET_ITERATOR_TYPE", - 194: "JS_MAP_ITERATOR_TYPE", - 195: "JS_ITERATOR_RESULT_TYPE", + 168: "SHARED_FUNCTION_INFO_TYPE", + 170: "WEAK_CELL_TYPE", + 171: "TRANSITION_ARRAY_TYPE", + 180: "JS_MESSAGE_OBJECT_TYPE", + 179: "JS_VALUE_TYPE", + 181: "JS_DATE_TYPE", + 183: "JS_OBJECT_TYPE", + 184: "JS_ARGUMENTS_TYPE", + 185: "JS_CONTEXT_EXTENSION_OBJECT_TYPE", + 186: "JS_GENERATOR_OBJECT_TYPE", + 187: "JS_MODULE_TYPE", + 176: "JS_GLOBAL_OBJECT_TYPE", + 177: "JS_GLOBAL_PROXY_TYPE", + 182: "JS_API_OBJECT_TYPE", + 178: "JS_SPECIAL_API_OBJECT_TYPE", + 188: "JS_ARRAY_TYPE", + 189: "JS_ARRAY_BUFFER_TYPE", + 190: "JS_TYPED_ARRAY_TYPE", + 191: "JS_DATA_VIEW_TYPE", + 175: "JS_PROXY_TYPE", + 192: "JS_SET_TYPE", + 193: "JS_MAP_TYPE", + 194: "JS_SET_ITERATOR_TYPE", + 195: "JS_MAP_ITERATOR_TYPE", 196: "JS_WEAK_MAP_TYPE", 197: "JS_WEAK_SET_TYPE", 198: "JS_PROMISE_TYPE", 199: "JS_REGEXP_TYPE", - 200: "JS_BOUND_FUNCTION_TYPE", - 201: "JS_FUNCTION_TYPE", - 167: "DEBUG_INFO_TYPE", - 168: "BREAK_POINT_INFO_TYPE", + 200: "JS_ERROR_TYPE", + 201: "JS_BOUND_FUNCTION_TYPE", + 202: "JS_FUNCTION_TYPE", + 165: "DEBUG_INFO_TYPE", + 166: "BREAK_POINT_INFO_TYPE", } # List of known V8 maps. KNOWN_MAPS = { - 0x08081: (136, "ByteArrayMap"), - 0x080ad: (132, "MetaMap"), - 0x080d9: (131, "NullMap"), - 0x08105: (169, "FixedArrayMap"), - 0x08131: (4, "OneByteInternalizedStringMap"), - 0x0815d: (138, "FreeSpaceMap"), - 0x08189: (149, "OnePointerFillerMap"), - 0x081b5: (149, "TwoPointerFillerMap"), - 0x081e1: (131, "UndefinedMap"), - 0x0820d: (129, "HeapNumberMap"), - 0x08239: (131, "TheHoleMap"), - 0x08265: (131, "BooleanMap"), - 0x08291: (131, "UninitializedMap"), - 0x082bd: (171, "CellMap"), - 0x082e9: (174, "GlobalPropertyCellMap"), - 0x08315: (170, "SharedFunctionInfoMap"), - 0x08341: (134, "MutableHeapNumberMap"), - 0x0836d: (130, "Float32x4Map"), - 0x08399: (130, "Int32x4Map"), - 0x083c5: (130, "Uint32x4Map"), - 0x083f1: (130, "Bool32x4Map"), - 0x0841d: (130, "Int16x8Map"), - 0x08449: (130, "Uint16x8Map"), - 0x08475: (130, "Bool16x8Map"), - 0x084a1: (130, "Int8x16Map"), - 0x084cd: (130, "Uint8x16Map"), - 0x084f9: (130, "Bool8x16Map"), - 0x08525: (169, "NativeContextMap"), - 0x08551: (133, "CodeMap"), - 0x0857d: (169, "ScopeInfoMap"), - 0x085a9: (169, "FixedCOWArrayMap"), - 0x085d5: (148, "FixedDoubleArrayMap"), - 0x08601: (172, "WeakCellMap"), - 0x0862d: (173, "TransitionArrayMap"), - 0x08659: (68, "OneByteStringMap"), - 0x08685: (169, "FunctionContextMap"), - 0x086b1: (131, "NoInterceptorResultSentinelMap"), - 0x086dd: (131, "ArgumentsMarkerMap"), - 0x08709: (131, "ExceptionMap"), - 0x08735: (131, "TerminationExceptionMap"), - 0x08761: (169, "HashTableMap"), - 0x0878d: (169, "OrderedHashTableMap"), - 0x087b9: (128, "SymbolMap"), - 0x087e5: (64, "StringMap"), - 0x08811: (69, "ConsOneByteStringMap"), - 0x0883d: (65, "ConsStringMap"), - 0x08869: (67, "SlicedStringMap"), - 0x08895: (71, "SlicedOneByteStringMap"), - 0x088c1: (66, "ExternalStringMap"), - 0x088ed: (74, "ExternalStringWithOneByteDataMap"), - 0x08919: (70, "ExternalOneByteStringMap"), - 0x08945: (70, "NativeSourceStringMap"), - 0x08971: (82, "ShortExternalStringMap"), - 0x0899d: (90, "ShortExternalStringWithOneByteDataMap"), - 0x089c9: (0, "InternalizedStringMap"), - 0x089f5: (2, "ExternalInternalizedStringMap"), - 0x08a21: (10, "ExternalInternalizedStringWithOneByteDataMap"), - 0x08a4d: (6, "ExternalOneByteInternalizedStringMap"), - 0x08a79: (18, "ShortExternalInternalizedStringMap"), - 0x08aa5: (26, "ShortExternalInternalizedStringWithOneByteDataMap"), - 0x08ad1: (22, "ShortExternalOneByteInternalizedStringMap"), - 0x08afd: (86, "ShortExternalOneByteStringMap"), - 0x08b29: (140, "FixedUint8ArrayMap"), - 0x08b55: (139, "FixedInt8ArrayMap"), - 0x08b81: (142, "FixedUint16ArrayMap"), - 0x08bad: (141, "FixedInt16ArrayMap"), - 0x08bd9: (144, "FixedUint32ArrayMap"), - 0x08c05: (143, "FixedInt32ArrayMap"), - 0x08c31: (145, "FixedFloat32ArrayMap"), - 0x08c5d: (146, "FixedFloat64ArrayMap"), - 0x08c89: (147, "FixedUint8ClampedArrayMap"), - 0x08cb5: (169, "SloppyArgumentsElementsMap"), - 0x08ce1: (169, "CatchContextMap"), - 0x08d0d: (169, "WithContextMap"), - 0x08d39: (169, "BlockContextMap"), - 0x08d65: (169, "ModuleContextMap"), - 0x08d91: (169, "ScriptContextMap"), - 0x08dbd: (169, "ScriptContextTableMap"), - 0x08de9: (179, "JSMessageObjectMap"), - 0x08e15: (135, "ForeignMap"), - 0x08e41: (181, "NeanderMap"), - 0x08e6d: (181, "ExternalMap"), - 0x08e99: (160, "AllocationMementoMap"), - 0x08ec5: (159, "AllocationSiteMap"), - 0x08ef1: (163, "PolymorphicCodeCacheMap"), - 0x08f1d: (161, "ScriptMap"), - 0x08f75: (137, "BytecodeArrayMap"), - 0x08fa1: (166, "BoxMap"), - 0x08fcd: (150, "AccessorInfoMap"), - 0x08ff9: (151, "AccessorPairMap"), - 0x09025: (152, "AccessCheckInfoMap"), - 0x09051: (153, "InterceptorInfoMap"), - 0x0907d: (154, "CallHandlerInfoMap"), - 0x090a9: (155, "FunctionTemplateInfoMap"), - 0x090d5: (156, "ObjectTemplateInfoMap"), - 0x09101: (162, "CodeCacheMap"), - 0x0912d: (164, "TypeFeedbackInfoMap"), - 0x09159: (165, "AliasedArgumentsEntryMap"), - 0x09185: (167, "DebugInfoMap"), - 0x091b1: (168, "BreakPointInfoMap"), - 0x091dd: (175, "PrototypeInfoMap"), - 0x09209: (176, "SloppyBlockWithEvalContextExtensionMap"), + 0x08101: (138, "FreeSpaceMap"), + 0x0812d: (132, "MetaMap"), + 0x08159: (131, "NullMap"), + 0x08185: (167, "FixedArrayMap"), + 0x081b1: (4, "OneByteInternalizedStringMap"), + 0x081dd: (149, "OnePointerFillerMap"), + 0x08209: (149, "TwoPointerFillerMap"), + 0x08235: (131, "UninitializedMap"), + 0x08261: (131, "UndefinedMap"), + 0x0828d: (129, "HeapNumberMap"), + 0x082b9: (131, "TheHoleMap"), + 0x082e5: (131, "BooleanMap"), + 0x08311: (136, "ByteArrayMap"), + 0x0833d: (167, "FixedCOWArrayMap"), + 0x08369: (167, "HashTableMap"), + 0x08395: (128, "SymbolMap"), + 0x083c1: (68, "OneByteStringMap"), + 0x083ed: (167, "ScopeInfoMap"), + 0x08419: (168, "SharedFunctionInfoMap"), + 0x08445: (133, "CodeMap"), + 0x08471: (167, "FunctionContextMap"), + 0x0849d: (169, "CellMap"), + 0x084c9: (170, "WeakCellMap"), + 0x084f5: (172, "GlobalPropertyCellMap"), + 0x08521: (135, "ForeignMap"), + 0x0854d: (171, "TransitionArrayMap"), + 0x08579: (131, "NoInterceptorResultSentinelMap"), + 0x085a5: (131, "ArgumentsMarkerMap"), + 0x085d1: (167, "NativeContextMap"), + 0x085fd: (167, "ModuleContextMap"), + 0x08629: (167, "ScriptContextMap"), + 0x08655: (167, "BlockContextMap"), + 0x08681: (167, "CatchContextMap"), + 0x086ad: (167, "WithContextMap"), + 0x086d9: (148, "FixedDoubleArrayMap"), + 0x08705: (134, "MutableHeapNumberMap"), + 0x08731: (167, "OrderedHashTableMap"), + 0x0875d: (167, "SloppyArgumentsElementsMap"), + 0x08789: (180, "JSMessageObjectMap"), + 0x087b5: (183, "NeanderMap"), + 0x087e1: (137, "BytecodeArrayMap"), + 0x0880d: (64, "StringMap"), + 0x08839: (69, "ConsOneByteStringMap"), + 0x08865: (65, "ConsStringMap"), + 0x08891: (67, "SlicedStringMap"), + 0x088bd: (71, "SlicedOneByteStringMap"), + 0x088e9: (66, "ExternalStringMap"), + 0x08915: (74, "ExternalStringWithOneByteDataMap"), + 0x08941: (70, "ExternalOneByteStringMap"), + 0x0896d: (82, "ShortExternalStringMap"), + 0x08999: (90, "ShortExternalStringWithOneByteDataMap"), + 0x089c5: (0, "InternalizedStringMap"), + 0x089f1: (2, "ExternalInternalizedStringMap"), + 0x08a1d: (10, "ExternalInternalizedStringWithOneByteDataMap"), + 0x08a49: (6, "ExternalOneByteInternalizedStringMap"), + 0x08a75: (18, "ShortExternalInternalizedStringMap"), + 0x08aa1: (26, "ShortExternalInternalizedStringWithOneByteDataMap"), + 0x08acd: (22, "ShortExternalOneByteInternalizedStringMap"), + 0x08af9: (86, "ShortExternalOneByteStringMap"), + 0x08b25: (130, "Float32x4Map"), + 0x08b51: (130, "Int32x4Map"), + 0x08b7d: (130, "Uint32x4Map"), + 0x08ba9: (130, "Bool32x4Map"), + 0x08bd5: (130, "Int16x8Map"), + 0x08c01: (130, "Uint16x8Map"), + 0x08c2d: (130, "Bool16x8Map"), + 0x08c59: (130, "Int8x16Map"), + 0x08c85: (130, "Uint8x16Map"), + 0x08cb1: (130, "Bool8x16Map"), + 0x08cdd: (131, "ExceptionMap"), + 0x08d09: (131, "TerminationExceptionMap"), + 0x08d35: (131, "OptimizedOutMap"), + 0x08d61: (131, "StaleRegisterMap"), + 0x08d8d: (167, "DebugEvaluateContextMap"), + 0x08db9: (167, "ScriptContextTableMap"), + 0x08de5: (167, "UnseededNumberDictionaryMap"), + 0x08e11: (183, "ExternalMap"), + 0x08e3d: (86, "NativeSourceStringMap"), + 0x08e69: (140, "FixedUint8ArrayMap"), + 0x08e95: (139, "FixedInt8ArrayMap"), + 0x08ec1: (142, "FixedUint16ArrayMap"), + 0x08eed: (141, "FixedInt16ArrayMap"), + 0x08f19: (144, "FixedUint32ArrayMap"), + 0x08f45: (143, "FixedInt32ArrayMap"), + 0x08f71: (145, "FixedFloat32ArrayMap"), + 0x08f9d: (146, "FixedFloat64ArrayMap"), + 0x08fc9: (147, "FixedUint8ClampedArrayMap"), + 0x08ff5: (161, "ScriptMap"), + 0x09021: (159, "AllocationSiteMap"), + 0x0904d: (160, "AllocationMementoMap"), + 0x09079: (150, "AccessorInfoMap"), + 0x090a5: (164, "BoxMap"), + 0x090d1: (151, "AccessorPairMap"), + 0x090fd: (152, "AccessCheckInfoMap"), + 0x09129: (153, "InterceptorInfoMap"), + 0x09155: (154, "CallHandlerInfoMap"), + 0x09181: (155, "FunctionTemplateInfoMap"), + 0x091ad: (156, "ObjectTemplateInfoMap"), + 0x091d9: (162, "TypeFeedbackInfoMap"), + 0x09205: (163, "AliasedArgumentsEntryMap"), + 0x09231: (165, "DebugInfoMap"), + 0x0925d: (166, "BreakPointInfoMap"), + 0x09289: (173, "PrototypeInfoMap"), + 0x092b5: (174, "ContextExtensionMap"), } # List of known V8 objects. KNOWN_OBJECTS = { - ("OLD_SPACE", 0x08081): "NullValue", - ("OLD_SPACE", 0x08095): "EmptyDescriptorArray", - ("OLD_SPACE", 0x0809d): "EmptyFixedArray", - ("OLD_SPACE", 0x080c9): "UndefinedValue", - ("OLD_SPACE", 0x080f5): "NanValue", - ("OLD_SPACE", 0x08105): "TheHoleValue", - ("OLD_SPACE", 0x08129): "TrueValue", - ("OLD_SPACE", 0x08161): "FalseValue", - ("OLD_SPACE", 0x08189): "empty_string", - ("OLD_SPACE", 0x08195): "hidden_string", - ("OLD_SPACE", 0x081a1): "UninitializedValue", - ("OLD_SPACE", 0x081d1): "EmptyByteArray", - ("OLD_SPACE", 0x081d9): "NoInterceptorResultSentinel", - ("OLD_SPACE", 0x08219): "ArgumentsMarker", - ("OLD_SPACE", 0x08249): "Exception", - ("OLD_SPACE", 0x08275): "TerminationException", - ("OLD_SPACE", 0x082ad): "NumberStringCache", - ("OLD_SPACE", 0x08ab5): "SingleCharacterStringCache", - ("OLD_SPACE", 0x08f4d): "StringSplitCache", - ("OLD_SPACE", 0x09355): "RegExpMultipleCache", - ("OLD_SPACE", 0x0975d): "EmptyFixedUint8Array", - ("OLD_SPACE", 0x0976d): "EmptyFixedInt8Array", - ("OLD_SPACE", 0x0977d): "EmptyFixedUint16Array", - ("OLD_SPACE", 0x0978d): "EmptyFixedInt16Array", - ("OLD_SPACE", 0x0979d): "EmptyFixedUint32Array", - ("OLD_SPACE", 0x097ad): "EmptyFixedInt32Array", - ("OLD_SPACE", 0x097bd): "EmptyFixedFloat32Array", - ("OLD_SPACE", 0x097cd): "EmptyFixedFloat64Array", - ("OLD_SPACE", 0x097dd): "EmptyFixedUint8ClampedArray", - ("OLD_SPACE", 0x097ed): "InfinityValue", - ("OLD_SPACE", 0x097fd): "MinusZeroValue", - ("OLD_SPACE", 0x0980d): "MinusInfinityValue", - ("OLD_SPACE", 0x0981d): "MessageListeners", - ("OLD_SPACE", 0x09839): "CodeStubs", - ("OLD_SPACE", 0x0feb9): "DummyVector", - ("OLD_SPACE", 0x13fed): "NonMonomorphicCache", - ("OLD_SPACE", 0x14601): "PolymorphicCodeCache", - ("OLD_SPACE", 0x14609): "NativesSourceCache", - ("OLD_SPACE", 0x1488d): "ExperimentalNativesSourceCache", - ("OLD_SPACE", 0x148c1): "ExtraNativesSourceCache", - ("OLD_SPACE", 0x148e1): "ExperimentalExtraNativesSourceCache", - ("OLD_SPACE", 0x148ed): "EmptyScript", - ("OLD_SPACE", 0x1492d): "IntrinsicFunctionNames", - ("OLD_SPACE", 0x2e919): "EmptyPropertiesDictionary", - ("OLD_SPACE", 0x2e965): "UndefinedCell", - ("OLD_SPACE", 0x2e96d): "ObservationState", - ("OLD_SPACE", 0x2e979): "ScriptList", - ("OLD_SPACE", 0x2eb01): "ClearedOptimizedCodeMap", - ("OLD_SPACE", 0x2eb0d): "EmptyWeakCell", - ("OLD_SPACE", 0x534d1): "EmptySlowElementDictionary", - ("OLD_SPACE", 0x5351d): "WeakObjectToCodeTable", - ("OLD_SPACE", 0x53631): "ArrayProtector", - ("OLD_SPACE", 0x53641): "EmptyPropertyCell", - ("OLD_SPACE", 0x53651): "NoScriptSharedFunctionInfos", - ("OLD_SPACE", 0x59cf1): "StringTable", - ("CODE_SPACE", 0x1a001): "JsEntryCode", - ("CODE_SPACE", 0x1e721): "JsConstructEntryCode", + ("OLD_SPACE", 0x08101): "NullValue", + ("OLD_SPACE", 0x0811d): "EmptyDescriptorArray", + ("OLD_SPACE", 0x08125): "EmptyFixedArray", + ("OLD_SPACE", 0x08151): "UninitializedValue", + ("OLD_SPACE", 0x081a1): "UndefinedValue", + ("OLD_SPACE", 0x081bd): "NanValue", + ("OLD_SPACE", 0x081cd): "TheHoleValue", + ("OLD_SPACE", 0x081f9): "TrueValue", + ("OLD_SPACE", 0x08239): "FalseValue", + ("OLD_SPACE", 0x08269): "empty_string", + ("OLD_SPACE", 0x08275): "NoInterceptorResultSentinel", + ("OLD_SPACE", 0x082bd): "ArgumentsMarker", + ("OLD_SPACE", 0x082f5): "EmptyByteArray", + ("OLD_SPACE", 0x082fd): "EmptyWeakCell", + ("OLD_SPACE", 0x0830d): "InfinityValue", + ("OLD_SPACE", 0x0831d): "MinusZeroValue", + ("OLD_SPACE", 0x0832d): "MinusInfinityValue", + ("OLD_SPACE", 0x09961): "EmptyLiteralsArray", + ("OLD_SPACE", 0x0996d): "ClearedOptimizedCodeMap", + ("OLD_SPACE", 0x09979): "Exception", + ("OLD_SPACE", 0x099ad): "TerminationException", + ("OLD_SPACE", 0x099ed): "OptimizedOut", + ("OLD_SPACE", 0x09a25): "StaleRegister", + ("OLD_SPACE", 0x09a5d): "EmptyFixedUint8Array", + ("OLD_SPACE", 0x09a6d): "EmptyFixedInt8Array", + ("OLD_SPACE", 0x09a7d): "EmptyFixedUint16Array", + ("OLD_SPACE", 0x09a8d): "EmptyFixedInt16Array", + ("OLD_SPACE", 0x09a9d): "EmptyFixedUint32Array", + ("OLD_SPACE", 0x09aad): "EmptyFixedInt32Array", + ("OLD_SPACE", 0x09abd): "EmptyFixedFloat32Array", + ("OLD_SPACE", 0x09acd): "EmptyFixedFloat64Array", + ("OLD_SPACE", 0x09add): "EmptyFixedUint8ClampedArray", + ("OLD_SPACE", 0x09aed): "EmptyScript", + ("OLD_SPACE", 0x09b2d): "UndefinedCell", + ("OLD_SPACE", 0x09b35): "EmptySloppyArgumentsElements", + ("OLD_SPACE", 0x09b45): "EmptySlowElementDictionary", + ("OLD_SPACE", 0x09b91): "DummyVector", + ("OLD_SPACE", 0x09c09): "EmptyPropertyCell", + ("OLD_SPACE", 0x09c19): "ArrayProtector", + ("OLD_SPACE", 0x09c29): "IsConcatSpreadableProtector", + ("OLD_SPACE", 0x09c31): "HasInstanceProtector", + ("OLD_SPACE", 0x09c41): "SpeciesProtector", + ("OLD_SPACE", 0x09c49): "NumberStringCache", + ("OLD_SPACE", 0x0a451): "SingleCharacterStringCache", + ("OLD_SPACE", 0x0a909): "StringSplitCache", + ("OLD_SPACE", 0x0ad11): "RegExpMultipleCache", + ("OLD_SPACE", 0x0b119): "NativesSourceCache", + ("OLD_SPACE", 0x0b2e5): "ExperimentalNativesSourceCache", + ("OLD_SPACE", 0x0b309): "ExtraNativesSourceCache", + ("OLD_SPACE", 0x0b325): "ExperimentalExtraNativesSourceCache", + ("OLD_SPACE", 0x0b331): "IntrinsicFunctionNames", + ("OLD_SPACE", 0x244bd): "EmptyPropertiesDictionary", + ("OLD_SPACE", 0x24509): "ScriptList", + ("OLD_SPACE", 0x3fd85): "CodeStubs", + ("OLD_SPACE", 0x49285): "WeakObjectToCodeTable", + ("OLD_SPACE", 0x49399): "WeakNewSpaceObjectToCodeList", + ("OLD_SPACE", 0x493e1): "NoScriptSharedFunctionInfos", + ("OLD_SPACE", 0x50cf9): "MessageListeners", + ("OLD_SPACE", 0x5494d): "StringTable", + ("CODE_SPACE", 0x184a1): "JsConstructEntryCode", + ("CODE_SPACE", 0x23fe1): "JsEntryCode", } diff --git a/deps/v8/tools/verify_source_deps.py b/deps/v8/tools/verify_source_deps.py index 50caace79c8e71..e62ec8f306a41b 100755 --- a/deps/v8/tools/verify_source_deps.py +++ b/deps/v8/tools/verify_source_deps.py @@ -8,45 +8,99 @@ .h and .cc files in the source tree and which files are included in the gyp and gn files. The latter inclusion is overapproximated. -TODO(machenbach): Gyp files in src will point to source files in src without a -src/ prefix. For simplicity, all paths relative to src are stripped. But this -tool won't be accurate for other sources in other directories (e.g. cctest). +TODO(machenbach): If two source files with the same name exist, but only one +is referenced from a gyp/gn file, we won't necessarily detect it. """ import itertools import re import os +import subprocess +import sys V8_BASE = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) -V8_SRC_BASE = os.path.join(V8_BASE, 'src') -V8_INCLUDE_BASE = os.path.join(V8_BASE, 'include') GYP_FILES = [ os.path.join(V8_BASE, 'src', 'd8.gyp'), + os.path.join(V8_BASE, 'src', 'v8.gyp'), + os.path.join(V8_BASE, 'src', 'inspector', 'inspector.gypi'), os.path.join(V8_BASE, 'src', 'third_party', 'vtune', 'v8vtune.gyp'), + os.path.join(V8_BASE, 'samples', 'samples.gyp'), os.path.join(V8_BASE, 'test', 'cctest', 'cctest.gyp'), + os.path.join(V8_BASE, 'test', 'fuzzer', 'fuzzer.gyp'), os.path.join(V8_BASE, 'test', 'unittests', 'unittests.gyp'), - os.path.join(V8_BASE, 'tools', 'gyp', 'v8.gyp'), + os.path.join(V8_BASE, 'testing', 'gmock.gyp'), + os.path.join(V8_BASE, 'testing', 'gtest.gyp'), os.path.join(V8_BASE, 'tools', 'parser-shell.gyp'), ] +ALL_GYP_PREFIXES = [ + '..', + 'common', + os.path.join('src', 'third_party', 'vtune'), + 'src', + 'samples', + 'testing', + 'tools', + os.path.join('test', 'cctest'), + os.path.join('test', 'common'), + os.path.join('test', 'fuzzer'), + os.path.join('test', 'unittests'), +] + +GYP_UNSUPPORTED_FEATURES = [ + 'gcmole', +] + +GN_FILES = [ + os.path.join(V8_BASE, 'BUILD.gn'), + os.path.join(V8_BASE, 'build', 'secondary', 'testing', 'gmock', 'BUILD.gn'), + os.path.join(V8_BASE, 'build', 'secondary', 'testing', 'gtest', 'BUILD.gn'), + os.path.join(V8_BASE, 'src', 'inspector', 'BUILD.gn'), + os.path.join(V8_BASE, 'test', 'cctest', 'BUILD.gn'), + os.path.join(V8_BASE, 'test', 'unittests', 'BUILD.gn'), + os.path.join(V8_BASE, 'tools', 'BUILD.gn'), +] + +GN_UNSUPPORTED_FEATURES = [ + 'aix', + 'cygwin', + 'freebsd', + 'gcmole', + 'openbsd', + 'ppc', + 'qnx', + 'solaris', + 'vtune', + 'x87', +] -def path_no_prefix(path): - if path.startswith('../'): - return path_no_prefix(path[3:]) - elif path.startswith('src/'): - return path_no_prefix(path[4:]) - else: - return path +ALL_GN_PREFIXES = [ + '..', + os.path.join('src', 'inspector'), + 'src', + 'testing', + os.path.join('test', 'cctest'), + os.path.join('test', 'unittests'), +] + +def pathsplit(path): + return re.split('[/\\\\]', path) +def path_no_prefix(path, prefixes): + for prefix in prefixes: + if path.startswith(prefix + os.sep): + return path_no_prefix(path[len(prefix) + 1:], prefixes) + return path -def isources(directory): - for root, dirs, files in os.walk(directory): - for f in files: - if not (f.endswith('.h') or f.endswith('.cc')): - continue - yield path_no_prefix(os.path.relpath(os.path.join(root, f), V8_BASE)) + +def isources(prefixes): + cmd = ['git', 'ls-tree', '-r', 'HEAD', '--full-name', '--name-only'] + for f in subprocess.check_output(cmd, universal_newlines=True).split('\n'): + if not (f.endswith('.h') or f.endswith('.cc')): + continue + yield path_no_prefix(os.path.join(*pathsplit(f)), prefixes) def iflatten(obj): @@ -59,7 +113,7 @@ def iflatten(obj): for i in iflatten(value): yield i elif isinstance(obj, basestring): - yield path_no_prefix(obj) + yield path_no_prefix(os.path.join(*pathsplit(obj)), ALL_GYP_PREFIXES) def iflatten_gyp_file(gyp_file): @@ -80,27 +134,44 @@ def iflatten_gn_file(gn_file): for line in f.read().splitlines(): match = re.match(r'.*"([^"]*)".*', line) if match: - yield path_no_prefix(match.group(1)) + yield path_no_prefix( + os.path.join(*pathsplit(match.group(1))), ALL_GN_PREFIXES) -def icheck_values(values, *source_dirs): - for source_file in itertools.chain( - *[isources(source_dir) for source_dir in source_dirs] - ): +def icheck_values(values, prefixes): + for source_file in isources(prefixes): if source_file not in values: yield source_file -gyp_values = set(itertools.chain( - *[iflatten_gyp_file(gyp_file) for gyp_file in GYP_FILES] - )) +def missing_gyp_files(): + gyp_values = set(itertools.chain( + *[iflatten_gyp_file(gyp_file) for gyp_file in GYP_FILES] + )) + gyp_files = sorted(icheck_values(gyp_values, ALL_GYP_PREFIXES)) + return filter( + lambda x: not any(i in x for i in GYP_UNSUPPORTED_FEATURES), gyp_files) + + +def missing_gn_files(): + gn_values = set(itertools.chain( + *[iflatten_gn_file(gn_file) for gn_file in GN_FILES] + )) + + gn_files = sorted(icheck_values(gn_values, ALL_GN_PREFIXES)) + return filter( + lambda x: not any(i in x for i in GN_UNSUPPORTED_FEATURES), gn_files) + -print "----------- Files not in gyp: ------------" -for i in sorted(icheck_values(gyp_values, V8_SRC_BASE, V8_INCLUDE_BASE)): - print i +def main(): + print "----------- Files not in gyp: ------------" + for i in missing_gyp_files(): + print i -gn_values = set(iflatten_gn_file(os.path.join(V8_BASE, 'BUILD.gn'))) + print "\n----------- Files not in gn: -------------" + for i in missing_gn_files(): + print i + return 0 -print "\n----------- Files not in gn: -------------" -for i in sorted(icheck_values(gn_values, V8_SRC_BASE, V8_INCLUDE_BASE)): - print i +if '__main__' == __name__: + sys.exit(main()) diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt index d1395f5d9132ac..2229d87d53dd65 100644 --- a/deps/v8/tools/whitespace.txt +++ b/deps/v8/tools/whitespace.txt @@ -5,4 +5,5 @@ Try to write something funny. And please don't add trailing whitespace. A Smi balks into a war and says: "I'm so deoptimized today!" The doubles heard this and started to unbox. -The Smi looked at them when a crazy v8-autoroll account showed up.. +The Smi looked at them when a crazy v8-autoroll account showed up...... +The autoroller bought a round of Himbeerbrause. Suddenly ... diff --git a/lib/repl.js b/lib/repl.js index 1d678e67114c09..0d739ee2d602be 100644 --- a/lib/repl.js +++ b/lib/repl.js @@ -498,7 +498,19 @@ function REPLServer(prompt, } var evalCmd = self.bufferedCommand + cmd; - evalCmd = preprocess(evalCmd); + if (/^\s*\{/.test(evalCmd) && /\}\s*$/.test(evalCmd)) { + // It's confusing for `{ a : 1 }` to be interpreted as a block + // statement rather than an object literal. So, we first try + // to wrap it in parentheses, so that it will be interpreted as + // an expression. + evalCmd = '(' + evalCmd + ')\n'; + self.wrappedCmd = true; + } else { + // otherwise we just append a \n so that it will be either + // terminated, or continued onto the next expression if it's an + // unexpected end of input. + evalCmd = evalCmd + '\n'; + } debug('eval %j', evalCmd); self.eval(evalCmd, self.context, 'repl', finish); @@ -555,26 +567,6 @@ function REPLServer(prompt, // Display prompt again self.displayPrompt(); } - - function preprocess(code) { - let cmd = code; - if (/^\s*\{/.test(cmd) && /\}\s*$/.test(cmd)) { - // It's confusing for `{ a : 1 }` to be interpreted as a block - // statement rather than an object literal. So, we first try - // to wrap it in parentheses, so that it will be interpreted as - // an expression. - cmd = `(${cmd})`; - self.wrappedCmd = true; - } else { - // Mitigate https://github.com/nodejs/node/issues/548 - cmd = cmd.replace(/^\s*function\s+([^(]+)/, - (_, name) => `var ${name} = function ${name}`); - } - // Append a \n so that it will be either - // terminated, or continued onto the next expression if it's an - // unexpected end of input. - return `${cmd}\n`; - } }); self.on('SIGCONT', function() { diff --git a/node.gyp b/node.gyp index 0f263d1346f1b5..59a7f34258d3fd 100644 --- a/node.gyp +++ b/node.gyp @@ -258,8 +258,8 @@ }], [ 'node_use_bundled_v8=="true"', { 'dependencies': [ - 'deps/v8/tools/gyp/v8.gyp:v8', - 'deps/v8/tools/gyp/v8.gyp:v8_libplatform' + 'deps/v8/src/v8.gyp:v8', + 'deps/v8/src/v8.gyp:v8_libplatform' ], }], [ 'node_use_v8_platform=="true"', { @@ -462,7 +462,7 @@ 'defines': [ 'NODE_NO_BROWSER_GLOBALS' ], } ], [ 'node_use_bundled_v8=="true" and v8_postmortem_support=="true"', { - 'dependencies': [ 'deps/v8/tools/gyp/v8.gyp:postmortem-metadata' ], + 'dependencies': [ 'deps/v8/src/v8.gyp:postmortem-metadata' ], 'conditions': [ # -force_load is not applicable for the static library [ 'node_target_type!="static_library"', { @@ -869,13 +869,13 @@ }], [ 'node_use_v8_platform=="true"', { 'dependencies': [ - 'deps/v8/tools/gyp/v8.gyp:v8_libplatform', + 'deps/v8/src/v8.gyp:v8_libplatform', ], }], [ 'node_use_bundled_v8=="true"', { 'dependencies': [ - 'deps/v8/tools/gyp/v8.gyp:v8', - 'deps/v8/tools/gyp/v8.gyp:v8_libplatform' + 'deps/v8/src/v8.gyp:v8', + 'deps/v8/src/v8.gyp:v8_libplatform' ], }], ] diff --git a/src/util.h b/src/util.h index 08fb01e5ac89cf..8a47234fdf3d4a 100644 --- a/src/util.h +++ b/src/util.h @@ -11,16 +11,7 @@ #include #include -// OSX 10.9 defaults to libc++ which provides a C++11 header. -#if defined(__APPLE__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1090 -#define USE_TR1_TYPE_TRAITS -#endif - -#ifdef USE_TR1_TYPE_TRAITS -#include // NOLINT(build/c++tr1) -#else #include // std::remove_reference -#endif namespace node { @@ -36,11 +27,7 @@ NO_RETURN void Abort(); NO_RETURN void Assert(const char* const (*args)[4]); void DumpBacktrace(FILE* fp); -#ifdef USE_TR1_TYPE_TRAITS -template using remove_reference = std::tr1::remove_reference; -#else template using remove_reference = std::remove_reference; -#endif #define FIXED_ONE_BYTE_STRING(isolate, string) \ (node::OneByteString((isolate), (string), sizeof(string) - 1)) diff --git a/test/known_issues/test-repl-function-redefinition-edge-case.js b/test/parallel/test-repl-function-redefinition-edge-case.js similarity index 95% rename from test/known_issues/test-repl-function-redefinition-edge-case.js rename to test/parallel/test-repl-function-redefinition-edge-case.js index 03b721fba7e7d5..1e3063e3db53ff 100644 --- a/test/known_issues/test-repl-function-redefinition-edge-case.js +++ b/test/parallel/test-repl-function-redefinition-edge-case.js @@ -13,7 +13,7 @@ r.input.emit('data', 'function a() { return 42; } (1)\n'); r.input.emit('data', 'a\n'); r.input.emit('data', '.exit'); -const expected = '1\n[Function a]\n'; +const expected = '1\n[Function: a]\n'; const got = r.output.accumulator.join(''); assert.strictEqual(got, expected); diff --git a/test/known_issues/test-vm-function-redefinition.js b/test/parallel/test-vm-function-redefinition.js similarity index 100% rename from test/known_issues/test-vm-function-redefinition.js rename to test/parallel/test-vm-function-redefinition.js diff --git a/tools/icu/icu-generic.gyp b/tools/icu/icu-generic.gyp index 9d466ac39227c3..ecbf6d18c24ee3 100644 --- a/tools/icu/icu-generic.gyp +++ b/tools/icu/icu-generic.gyp @@ -42,7 +42,6 @@ }], ], 'defines': [ - 'UCONFIG_NO_TRANSLITERATION=1', 'UCONFIG_NO_SERVICE=1', 'UCONFIG_NO_REGULAR_EXPRESSIONS=1', 'U_ENABLE_DYLOAD=0', diff --git a/tools/icu/icu_small.json b/tools/icu/icu_small.json index de26e2cbb14b16..8791269e53b60e 100644 --- a/tools/icu/icu_small.json +++ b/tools/icu/icu_small.json @@ -25,7 +25,7 @@ "zone": "locales", "converters": "none", "stringprep": "locales", - "translit": "none", + "translit": "locales", "brkfiles": "none", "brkdict": "none", "confusables": "none",