diff --git a/.eslintrc.yaml b/.eslintrc.yaml
index feb944f40bf774..ba6334b0346985 100644
--- a/.eslintrc.yaml
+++ b/.eslintrc.yaml
@@ -103,6 +103,7 @@ rules:
no-multiple-empty-lines: [2, {max: 2, maxEOF: 0, maxBOF: 0}]
no-tabs: 2
no-trailing-spaces: 2
+ one-var-declaration-per-line: 2
operator-linebreak: [2, after]
quotes: [2, single, avoid-escape]
semi: 2
@@ -112,6 +113,7 @@ rules:
space-in-parens: [2, never]
space-infix-ops: 2
space-unary-ops: 2
+ unicode-bom: 2
# ECMAScript 6
# http://eslint.org/docs/rules/#ecmascript-6
diff --git a/CHANGELOG.md b/CHANGELOG.md
index a61ac7d8cdcec3..50494140ae0c32 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -28,7 +28,8 @@ release.
- 7.6.0
+ 7.7.0
+ 7.6.0
7.5.0
7.4.0
7.3.0
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 6965f78b4d3e21..bf3dbd0ad0e163 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -85,6 +85,9 @@ Create a branch and start hacking:
$ git checkout -b my-branch -t origin/master
```
+Any text you write should follow the [Style Guide](doc/STYLE_GUIDE.md),
+including comments and API documentation.
+
### Step 3: Commit
Make sure git knows your name and email address:
diff --git a/Makefile b/Makefile
index 2aac5c2f6a26e0..410f643c91bd59 100644
--- a/Makefile
+++ b/Makefile
@@ -10,6 +10,8 @@ TEST_CI_ARGS ?=
STAGINGSERVER ?= node-www
LOGLEVEL ?= silent
OSTYPE := $(shell uname -s | tr '[A-Z]' '[a-z]')
+COVTESTS ?= test
+GTEST_FILTER ?= "*"
ifdef JOBS
PARALLEL_ARGS = -j $(JOBS)
@@ -113,8 +115,77 @@ distclean:
check: test
+# Remove files generated by running coverage, put the non-instrumented lib back
+# in place
+coverage-clean:
+ if [ -d lib_ ]; then rm -rf lib; mv lib_ lib; fi
+ -rm -rf node_modules
+ -rm -rf gcovr testing
+ -rm -rf out/$(BUILDTYPE)/.coverage
+ -rm -rf .cov_tmp coverage
+ -rm -f out/$(BUILDTYPE)/obj.target/node/src/*.gcda
+ -rm -f out/$(BUILDTYPE)/obj.target/node/src/tracing/*.gcda
+ -rm -f out/$(BUILDTYPE)/obj.target/node/src/*.gcno
+ -rm -f out/$(BUILDTYPE)/obj.target/node/src/tracing*.gcno
+
+# Build and test with code coverage reporting. Leave the lib directory
+# instrumented for any additional runs the user may want to make.
+# For C++ coverage reporting, this needs to be run in conjunction with configure
+# --coverage. html coverage reports will be created under coverage/
+
+coverage: coverage-test
+
+coverage-build: all
+ mkdir -p node_modules
+ if [ ! -d node_modules/istanbul-merge ]; then \
+ $(NODE) ./deps/npm install istanbul-merge; fi
+ if [ ! -d node_modules/nyc ]; then $(NODE) ./deps/npm install nyc; fi
+ if [ ! -d gcovr ]; then git clone --depth=1 \
+ --single-branch git://github.com/gcovr/gcovr.git; fi
+ if [ ! -d testing ]; then git clone --depth=1 \
+ --single-branch https://github.com/nodejs/testing.git; fi
+ if [ ! -f gcovr/scripts/gcovr.orig ]; then \
+ (cd gcovr && patch -N -p1 < \
+ "$(CURDIR)/testing/coverage/gcovr-patches.diff"); fi
+ if [ -d lib_ ]; then rm -rf lib; mv lib_ lib; fi
+ mv lib lib_
+ $(NODE) ./node_modules/.bin/nyc instrument lib_/ lib/
+ $(MAKE)
+
+coverage-test: coverage-build
+ -rm -rf out/$(BUILDTYPE)/.coverage
+ -rm -rf .cov_tmp
+ -rm -f out/$(BUILDTYPE)/obj.target/node/src/*.gcda
+ -rm -f out/$(BUILDTYPE)/obj.target/node/src/tracing/*.gcda
+ -$(MAKE) $(COVTESTS)
+ mv lib lib__
+ mv lib_ lib
+ mkdir -p coverage .cov_tmp
+ $(NODE) ./node_modules/.bin/istanbul-merge --out \
+ .cov_tmp/libcov.json 'out/Release/.coverage/coverage-*.json'
+ (cd lib && .$(NODE) ../node_modules/.bin/nyc report \
+ --temp-directory "$(CURDIR)/.cov_tmp" -r html \
+ --report-dir "../coverage")
+ -(cd out && "../gcovr/scripts/gcovr" --gcov-exclude='.*deps' \
+ --gcov-exclude='.*usr' -v -r Release/obj.target/node \
+ --html --html-detail -o ../coverage/cxxcoverage.html)
+ mv lib lib_
+ mv lib__ lib
+ @echo -n "Javascript coverage %: "
+ @grep -B1 Lines coverage/index.html | head -n1 \
+ | sed 's/<[^>]*>//g'| sed 's/ //g'
+ @echo -n "C++ coverage %: "
+ @grep -A3 Lines coverage/cxxcoverage.html | grep style \
+ | sed 's/<[^>]*>//g'| sed 's/ //g'
+
cctest: all
- @out/$(BUILDTYPE)/$@
+ @out/$(BUILDTYPE)/$@ --gtest_filter=$(GTEST_FILTER)
+
+list-gtests:
+ifeq (,$(wildcard out/$(BUILDTYPE)/cctest))
+ $(error Please run 'make cctest' first)
+endif
+ @out/$(BUILDTYPE)/cctest --gtest_list_tests
v8:
tools/make-v8.sh
@@ -133,8 +204,10 @@ test-parallel: all
test-valgrind: all
$(PYTHON) tools/test.py --mode=release --valgrind sequential parallel message
-test/gc/build/Release/binding.node: \
- $(NODE_EXE) test/gc/binding.cc test/gc/binding.gyp
+# Implicitly depends on $(NODE_EXE). We don't depend on it explicitly because
+# it always triggers a rebuild due to it being a .PHONY rule. See the comment
+# near the build-addons rule for more background.
+test/gc/build/Release/binding.node: test/gc/binding.cc test/gc/binding.gyp
$(NODE) deps/npm/node_modules/node-gyp/bin/node-gyp rebuild \
--python="$(PYTHON)" \
--directory="$(shell pwd)/test/gc" \
@@ -172,7 +245,7 @@ test/addons/.buildstamp: config.gypi \
# Cannot use $(wildcard test/addons/*/) here, it's evaluated before
# embedded addons have been generated from the documentation.
@for dirname in test/addons/*/; do \
- echo "\nBuilding addon $$PWD/$$dirname" ; \
+ printf "\nBuilding addon $$PWD/$$dirname\n" ; \
env MAKEFLAGS="-j1" $(NODE) deps/npm/node_modules/node-gyp/bin/node-gyp \
--loglevel=$(LOGLEVEL) rebuild \
--python="$(PYTHON)" \
@@ -224,6 +297,11 @@ test-ci-js: | clear-stalled
$(PYTHON) tools/test.py $(PARALLEL_ARGS) -p tap --logfile test.tap \
--mode=release --flaky-tests=$(FLAKY_TESTS) \
$(TEST_CI_ARGS) $(CI_JS_SUITES)
+ # Clean up any leftover processes
+ PS_OUT=`ps awwx | grep Release/node | grep -v grep | awk '{print $$1}'`; \
+ if [ "$${PS_OUT}" ]; then \
+ echo $${PS_OUT} | $(XARGS) kill; exit 1; \
+ fi
test-ci: LOGLEVEL := info
test-ci: | clear-stalled build-addons
@@ -231,6 +309,11 @@ test-ci: | clear-stalled build-addons
$(PYTHON) tools/test.py $(PARALLEL_ARGS) -p tap --logfile test.tap \
--mode=release --flaky-tests=$(FLAKY_TESTS) \
$(TEST_CI_ARGS) $(CI_NATIVE_SUITES) $(CI_JS_SUITES)
+ # Clean up any leftover processes
+ PS_OUT=`ps awwx | grep Release/node | grep -v grep | awk '{print $$1}'`; \
+ if [ "$${PS_OUT}" ]; then \
+ echo $${PS_OUT} | $(XARGS) kill; exit 1; \
+ fi
test-release: test-build
$(PYTHON) tools/test.py --mode=release
@@ -275,6 +358,11 @@ test-npm-publish: $(NODE_EXE)
test-addons: test-build
$(PYTHON) tools/test.py --mode=release addons
+test-addons-clean:
+ $(RM) -rf test/addons/??_*/
+ $(RM) -rf test/addons/*/build
+ $(RM) test/addons/.buildstamp test/addons/.docbuildstamp
+
test-timers:
$(MAKE) --directory=tools faketime
$(PYTHON) tools/test.py --mode=release timers
@@ -774,9 +862,11 @@ endif
.PHONY: lint cpplint jslint bench clean docopen docclean doc dist distclean \
check uninstall install install-includes install-bin all staticlib \
- dynamiclib test test-all test-addons build-addons website-upload pkg \
- blog blogclean tar binary release-only bench-http-simple bench-idle \
- bench-all bench bench-misc bench-array bench-buffer bench-net \
- bench-http bench-fs bench-tls cctest run-ci test-v8 test-v8-intl \
- test-v8-benchmarks test-v8-all v8 lint-ci bench-ci jslint-ci doc-only \
- $(TARBALL)-headers test-ci test-ci-native test-ci-js build-ci clear-stalled
+ dynamiclib test test-all test-addons test-addons-clean build-addons \
+ website-upload pkg blog blogclean tar binary release-only \
+ bench-http-simple bench-idle bench-all bench bench-misc bench-array \
+ bench-buffer bench-net bench-http bench-fs bench-tls cctest run-ci test-v8 \
+ test-v8-intl test-v8-benchmarks test-v8-all v8 lint-ci bench-ci jslint-ci \
+ doc-only $(TARBALL)-headers test-ci test-ci-native test-ci-js build-ci \
+ clear-stalled coverage-clean coverage-build coverage-test coverage \
+ list-gtests
diff --git a/benchmark/README.md b/benchmark/README.md
index d1233470757f20..6fd9a97bdfb3bb 100644
--- a/benchmark/README.md
+++ b/benchmark/README.md
@@ -1,417 +1,246 @@
-# Node.js core benchmark
-
-This folder contains benchmarks to measure the performance of the Node.js APIs.
-
-## Table of Content
-
-* [Prerequisites](#prerequisites)
-* [Running benchmarks](#running-benchmarks)
- * [Running individual benchmarks](#running-individual-benchmarks)
- * [Running all benchmarks](#running-all-benchmarks)
- * [Comparing node versions](#comparing-node-versions)
- * [Comparing parameters](#comparing-parameters)
-* [Creating a benchmark](#creating-a-benchmark)
-
-## Prerequisites
-
-Most of the HTTP benchmarks require a benchmarker to be installed, this can be
-either [`wrk`][wrk] or [`autocannon`][autocannon].
-
-`Autocannon` is a Node script that can be installed using
-`npm install -g autocannon`. It will use the Node executable that is in the
-path, hence if you want to compare two HTTP benchmark runs make sure that the
-Node version in the path is not altered.
-
-`wrk` may be available through your preferred package manager. If not, you can
-easily build it [from source][wrk] via `make`.
-
-By default `wrk` will be used as benchmarker. If it is not available
-`autocannon` will be used in it its place. When creating a HTTP benchmark you
-can specify which benchmarker should be used. You can force a specific
-benchmarker to be used by providing it as an argument, e. g.:
-
-`node benchmark/run.js --set benchmarker=autocannon http`
-
-`node benchmark/http/simple.js benchmarker=autocannon`
-
-Basic Unix tools are required for some benchmarks.
-[Git for Windows][git-for-windows] includes Git Bash and the necessary tools,
-which need to be included in the global Windows `PATH`.
-
-To analyze the results `R` should be installed. Check you package manager or
-download it from https://www.r-project.org/.
-
-The R packages `ggplot2` and `plyr` are also used and can be installed using
-the R REPL.
-
-```R
-$ R
-install.packages("ggplot2")
-install.packages("plyr")
-```
-
-### CRAN Mirror Issues
-In the event you get a message that you need to select a CRAN mirror first.
-
-You can specify a mirror by adding in the repo parameter.
-
-If we used the "http://cran.us.r-project.org" mirror, it could look something
-like this:
-
-```R
-install.packages("ggplot2", repo="http://cran.us.r-project.org")
-```
-
-Of course, use the mirror that suits your location.
-A list of mirrors is [located here](https://cran.r-project.org/mirrors.html).
-
-## Running benchmarks
-
-### Running individual benchmarks
-
-This can be useful for debugging a benchmark or doing a quick performance
-measure. But it does not provide the statistical information to make any
-conclusions about the performance.
-
-Individual benchmarks can be executed by simply executing the benchmark script
-with node.
-
-```console
-$ node benchmark/buffers/buffer-tostring.js
-
-buffers/buffer-tostring.js n=10000000 len=0 arg=true: 62710590.393305704
-buffers/buffer-tostring.js n=10000000 len=1 arg=true: 9178624.591787899
-buffers/buffer-tostring.js n=10000000 len=64 arg=true: 7658962.8891432695
-buffers/buffer-tostring.js n=10000000 len=1024 arg=true: 4136904.4060201733
-buffers/buffer-tostring.js n=10000000 len=0 arg=false: 22974354.231509723
-buffers/buffer-tostring.js n=10000000 len=1 arg=false: 11485945.656765845
-buffers/buffer-tostring.js n=10000000 len=64 arg=false: 8718280.70650129
-buffers/buffer-tostring.js n=10000000 len=1024 arg=false: 4103857.0726124765
-```
-
-Each line represents a single benchmark with parameters specified as
-`${variable}=${value}`. Each configuration combination is executed in a separate
-process. This ensures that benchmark results aren't affected by the execution
-order due to v8 optimizations. **The last number is the rate of operations
-measured in ops/sec (higher is better).**
-
-Furthermore you can specify a subset of the configurations, by setting them in
-the process arguments:
-
-```console
-$ node benchmark/buffers/buffer-tostring.js len=1024
-
-buffers/buffer-tostring.js n=10000000 len=1024 arg=true: 3498295.68561504
-buffers/buffer-tostring.js n=10000000 len=1024 arg=false: 3783071.1678948295
-```
-
-### Running all benchmarks
-
-Similar to running individual benchmarks, a group of benchmarks can be executed
-by using the `run.js` tool. Again this does not provide the statistical
-information to make any conclusions.
-
-```console
-$ node benchmark/run.js arrays
-
-arrays/var-int.js
-arrays/var-int.js n=25 type=Array: 71.90148040747789
-arrays/var-int.js n=25 type=Buffer: 92.89648382795582
-...
-
-arrays/zero-float.js
-arrays/zero-float.js n=25 type=Array: 75.46208316171496
-arrays/zero-float.js n=25 type=Buffer: 101.62785630273159
-...
-
-arrays/zero-int.js
-arrays/zero-int.js n=25 type=Array: 72.31023859816062
-arrays/zero-int.js n=25 type=Buffer: 90.49906662339653
-...
-```
-
-It is possible to execute more groups by adding extra process arguments.
-```console
-$ node benchmark/run.js arrays buffers
-```
-
-### Comparing node versions
-
-To compare the effect of a new node version use the `compare.js` tool. This
-will run each benchmark multiple times, making it possible to calculate
-statistics on the performance measures.
-
-As an example on how to check for a possible performance improvement, the
-[#5134](https://github.com/nodejs/node/pull/5134) pull request will be used as
-an example. This pull request _claims_ to improve the performance of the
-`string_decoder` module.
-
-First build two versions of node, one from the master branch (here called
-`./node-master`) and another with the pull request applied (here called
-`./node-pr-5135`).
-
-The `compare.js` tool will then produce a csv file with the benchmark results.
-
-```console
-$ node benchmark/compare.js --old ./node-master --new ./node-pr-5134 string_decoder > compare-pr-5134.csv
-```
-
-For analysing the benchmark results use the `compare.R` tool.
-
-```console
-$ cat compare-pr-5134.csv | Rscript benchmark/compare.R
-
- improvement confidence p.value
-string_decoder/string-decoder.js n=250000 chunk=1024 inlen=1024 encoding=ascii 12.46 % *** 1.165345e-04
-string_decoder/string-decoder.js n=250000 chunk=1024 inlen=1024 encoding=base64-ascii 24.70 % *** 1.820615e-15
-string_decoder/string-decoder.js n=250000 chunk=1024 inlen=1024 encoding=base64-utf8 23.60 % *** 2.105625e-12
-string_decoder/string-decoder.js n=250000 chunk=1024 inlen=1024 encoding=utf8 14.04 % *** 1.291105e-07
-string_decoder/string-decoder.js n=250000 chunk=1024 inlen=128 encoding=ascii 6.70 % * 2.928003e-02
-...
-```
-
-In the output, _improvement_ is the relative improvement of the new version,
-hopefully this is positive. _confidence_ tells if there is enough
-statistical evidence to validate the _improvement_. If there is enough evidence
-then there will be at least one star (`*`), more stars is just better. **However
-if there are no stars, then you shouldn't make any conclusions based on the
-_improvement_.** Sometimes this is fine, for example if you are expecting there
-to be no improvements, then there shouldn't be any stars.
-
-**A word of caution:** Statistics is not a foolproof tool. If a benchmark shows
-a statistical significant difference, there is a 5% risk that this
-difference doesn't actually exist. For a single benchmark this is not an
-issue. But when considering 20 benchmarks it's normal that one of them
-will show significance, when it shouldn't. A possible solution is to instead
-consider at least two stars (`**`) as the threshold, in that case the risk
-is 1%. If three stars (`***`) is considered the risk is 0.1%. However this
-may require more runs to obtain (can be set with `--runs`).
-
-_For the statistically minded, the R script performs an [independent/unpaired
-2-group t-test][t-test], with the null hypothesis that the performance is the
-same for both versions. The confidence field will show a star if the p-value
-is less than `0.05`._
-
-The `compare.R` tool can also produce a box plot by using the `--plot filename`
-option. In this case there are 48 different benchmark combinations, thus you
-may want to filter the csv file. This can be done while benchmarking using the
-`--set` parameter (e.g. `--set encoding=ascii`) or by filtering results
-afterwards using tools such as `sed` or `grep`. In the `sed` case be sure to
-keep the first line since that contains the header information.
-
-```console
-$ cat compare-pr-5134.csv | sed '1p;/encoding=ascii/!d' | Rscript benchmark/compare.R --plot compare-plot.png
-
- improvement confidence p.value
-string_decoder/string-decoder.js n=250000 chunk=1024 inlen=1024 encoding=ascii 12.46 % *** 1.165345e-04
-string_decoder/string-decoder.js n=250000 chunk=1024 inlen=128 encoding=ascii 6.70 % * 2.928003e-02
-string_decoder/string-decoder.js n=250000 chunk=1024 inlen=32 encoding=ascii 7.47 % *** 5.780583e-04
-string_decoder/string-decoder.js n=250000 chunk=16 inlen=1024 encoding=ascii 8.94 % *** 1.788579e-04
-string_decoder/string-decoder.js n=250000 chunk=16 inlen=128 encoding=ascii 10.54 % *** 4.016172e-05
-...
-```
-
-
-
-### Comparing parameters
-
-It can be useful to compare the performance for different parameters, for
-example to analyze the time complexity.
-
-To do this use the `scatter.js` tool, this will run a benchmark multiple times
-and generate a csv with the results.
-
-```console
-$ node benchmark/scatter.js benchmark/string_decoder/string-decoder.js > scatter.csv
-```
-
-After generating the csv, a comparison table can be created using the
-`scatter.R` tool. Even more useful it creates an actual scatter plot when using
-the `--plot filename` option.
-
-```console
-$ cat scatter.csv | Rscript benchmark/scatter.R --xaxis chunk --category encoding --plot scatter-plot.png --log
-
-aggregating variable: inlen
-
-chunk encoding mean confidence.interval
- 16 ascii 1111933.3 221502.48
- 16 base64-ascii 167508.4 33116.09
- 16 base64-utf8 122666.6 25037.65
- 16 utf8 783254.8 159601.79
- 64 ascii 2623462.9 399791.36
- 64 base64-ascii 462008.3 85369.45
- 64 base64-utf8 420108.4 85612.05
- 64 utf8 1358327.5 235152.03
- 256 ascii 3730343.4 371530.47
- 256 base64-ascii 663281.2 80302.73
- 256 base64-utf8 632911.7 81393.07
- 256 utf8 1554216.9 236066.53
- 1024 ascii 4399282.0 186436.46
- 1024 base64-ascii 730426.6 63806.12
- 1024 base64-utf8 680954.3 68076.33
- 1024 utf8 1554832.5 237532.07
-```
-
-Because the scatter plot can only show two variables (in this case _chunk_ and
-_encoding_) the rest is aggregated. Sometimes aggregating is a problem, this
-can be solved by filtering. This can be done while benchmarking using the
-`--set` parameter (e.g. `--set encoding=ascii`) or by filtering results
-afterwards using tools such as `sed` or `grep`. In the `sed` case be
-sure to keep the first line since that contains the header information.
-
-```console
-$ cat scatter.csv | sed -E '1p;/([^,]+, ){3}128,/!d' | Rscript benchmark/scatter.R --xaxis chunk --category encoding --plot scatter-plot.png --log
-
-chunk encoding mean confidence.interval
- 16 ascii 701285.96 21233.982
- 16 base64-ascii 107719.07 3339.439
- 16 base64-utf8 72966.95 2438.448
- 16 utf8 475340.84 17685.450
- 64 ascii 2554105.08 87067.132
- 64 base64-ascii 330120.32 8551.707
- 64 base64-utf8 249693.19 8990.493
- 64 utf8 1128671.90 48433.862
- 256 ascii 4841070.04 181620.768
- 256 base64-ascii 849545.53 29931.656
- 256 base64-utf8 809629.89 33773.496
- 256 utf8 1489525.15 49616.334
- 1024 ascii 4931512.12 165402.805
- 1024 base64-ascii 863933.22 27766.982
- 1024 base64-utf8 827093.97 24376.522
- 1024 utf8 1487176.43 50128.721
-```
-
-
-
-## Creating a benchmark
-
-All benchmarks use the `require('../common.js')` module. This contains the
-`createBenchmark(main, configs[, options])` method which will setup your
-benchmark.
-
-The arguments of `createBenchmark` are:
-
-* `main` {Function} The benchmark function,
- where the code running operations and controlling timers should go
-* `configs` {Object} The benchmark parameters. `createBenchmark` will run all
- possible combinations of these parameters, unless specified otherwise.
- Each configuration is a property with an array of possible values.
- Note that the configuration values can only be strings or numbers.
-* `options` {Object} The benchmark options. At the moment only the `flags`
- option for specifying command line flags is supported.
-
-`createBenchmark` returns a `bench` object, which is used for timing
-the runtime of the benchmark. Run `bench.start()` after the initialization
-and `bench.end(n)` when the benchmark is done. `n` is the number of operations
-you performed in the benchmark.
-
-The benchmark script will be run twice:
-
-The first pass will configure the benchmark with the combination of
-parameters specified in `configs`, and WILL NOT run the `main` function.
-In this pass, no flags except the ones directly passed via commands
-that you run the benchmarks with will be used.
-
-In the second pass, the `main` function will be run, and the process
-will be launched with:
-
-* The flags you've passed into `createBenchmark` (the third argument)
-* The flags in the command that you run this benchmark with
-
-Beware that any code outside the `main` function will be run twice
-in different processes. This could be troublesome if the code
-outside the `main` function has side effects. In general, prefer putting
-the code inside the `main` function if it's more than just declaration.
-
-```js
-'use strict';
-const common = require('../common.js');
-const SlowBuffer = require('buffer').SlowBuffer;
-
-const configs = {
- // Number of operations, specified here so they show up in the report.
- // Most benchmarks just use one value for all runs.
- n: [1024],
- type: ['fast', 'slow'], // Custom configurations
- size: [16, 128, 1024] // Custom configurations
-};
-
-const options = {
- // Add --expose-internals if you want to require internal modules in main
- flags: ['--zero-fill-buffers']
-};
-
-// main and configs are required, options is optional.
-const bench = common.createBenchmark(main, configs, options);
-
-// Note that any code outside main will be run twice,
-// in different processes, with different command line arguments.
-
-function main(conf) {
- // You will only get the flags that you have passed to createBenchmark
- // earlier when main is run. If you want to benchmark the internal modules,
- // require them here. For example:
- // const URL = require('internal/url').URL
-
- // Start the timer
- bench.start();
-
- // Do operations here
- const BufferConstructor = conf.type === 'fast' ? Buffer : SlowBuffer;
-
- for (let i = 0; i < conf.n; i++) {
- new BufferConstructor(conf.size);
- }
-
- // End the timer, pass in the number of operations
- bench.end(conf.n);
-}
-```
-
-## Creating HTTP benchmark
-
-The `bench` object returned by `createBenchmark` implements
-`http(options, callback)` method. It can be used to run external tool to
-benchmark HTTP servers.
-
-```js
-'use strict';
-
-const common = require('../common.js');
-
-const bench = common.createBenchmark(main, {
- kb: [64, 128, 256, 1024],
- connections: [100, 500]
-});
-
-function main(conf) {
- const http = require('http');
- const len = conf.kb * 1024;
- const chunk = Buffer.alloc(len, 'x');
- const server = http.createServer(function(req, res) {
- res.end(chunk);
- });
-
- server.listen(common.PORT, function() {
- bench.http({
- connections: conf.connections,
- }, function() {
- server.close();
- });
- });
-}
-```
-
-Supported options keys are:
-* `port` - defaults to `common.PORT`
-* `path` - defaults to `/`
-* `connections` - number of concurrent connections to use, defaults to 100
-* `duration` - duration of the benchmark in seconds, defaults to 10
-* `benchmarker` - benchmarker to use, defaults to
-`common.default_http_benchmarker`
-
-[autocannon]: https://github.com/mcollina/autocannon
-[wrk]: https://github.com/wg/wrk
-[t-test]: https://en.wikipedia.org/wiki/Student%27s_t-test#Equal_or_unequal_sample_sizes.2C_unequal_variances
-[git-for-windows]: http://git-scm.com/download/win
+# Node.js Core Benchmarks
+
+This folder contains code and data used to measure performance
+of different Node.js implementations and different ways of
+writing JavaScript run by the built-in JavaScript engine.
+
+For a detailed guide on how to write and run benchmarks in this
+directory, see [the guide on benchmarks](../doc/guides/writing-and-running-benchmarks.md).
+
+## Table of Contents
+
+* [Benchmark directories](#benchmark-directories)
+* [Common API](#common-api)
+
+## Benchmark Directories
+
+
+
+
+ Directory
+ Purpose
+
+
+
+
+ arrays
+
+ Benchmarks for various operations on array-like objects,
+ including Array
, Buffer
, and typed arrays.
+
+
+
+ assert
+
+ Benchmarks for the assert
subsystem.
+
+
+
+ buffers
+
+ Benchmarks for the buffer
subsystem.
+
+
+
+ child_process
+
+ Benchmarks for the child_process
subsystem.
+
+
+
+ crypto
+
+ Benchmarks for the crypto
subsystem.
+
+
+
+ dgram
+
+ Benchmarks for the dgram
subsystem.
+
+
+
+ domain
+
+ Benchmarks for the domain
subsystem.
+
+
+
+ es
+
+ Benchmarks for various new ECMAScript features and their
+ pre-ES2015 counterparts.
+
+
+
+ events
+
+ Benchmarks for the events
subsystem.
+
+
+
+ fixtures
+
+ Benchmarks fixtures used in various benchmarks throughout
+ the benchmark suite.
+
+
+
+ fs
+
+ Benchmarks for the fs
subsystem.
+
+
+
+ http
+
+ Benchmarks for the http
subsystem.
+
+
+
+ misc
+
+ Miscellaneous benchmarks and benchmarks for shared
+ internal modules.
+
+
+
+ module
+
+ Benchmarks for the module
subsystem.
+
+
+
+ net
+
+ Benchmarks for the net
subsystem.
+
+
+
+ path
+
+ Benchmarks for the path
subsystem.
+
+
+
+ process
+
+ Benchmarks for the process
subsystem.
+
+
+
+ querystring
+
+ Benchmarks for the querystring
subsystem.
+
+
+
+ streams
+
+ Benchmarks for the streams
subsystem.
+
+
+
+ string_decoder
+
+ Benchmarks for the string_decoder
subsystem.
+
+
+
+ timers
+
+ Benchmarks for the timers
subsystem, including
+ setTimeout
, setInterval
, .etc.
+
+
+
+ tls
+
+ Benchmarks for the tls
subsystem.
+
+
+
+ url
+
+ Benchmarks for the url
subsystem, including the legacy
+ url
implementation and the WHATWG URL implementation.
+
+
+
+ util
+
+ Benchmarks for the util
subsystem.
+
+
+
+ vm
+
+ Benchmarks for the vm
subsystem.
+
+
+
+
+
+### Other Top-level files
+
+The top-level files include common dependencies of the benchmarks
+and the tools for launching benchmarks and visualizing their output.
+The actual benchmark scripts should be placed in their corresponding
+directories.
+
+* `_benchmark_progress.js`: implements the progress bar displayed
+ when running `compare.js`
+* `_cli.js`: parses the command line arguments passed to `compare.js`,
+ `run.js` and `scatter.js`
+* `_cli.R`: parses the command line arguments passed to `compare.R`
+* `_http-benchmarkers.js`: selects and runs external tools for benchmarking
+ the `http` subsystem.
+* `common.js`: see [Common API](#common-api).
+* `compare.js`: command line tool for comparing performance between different
+ Node.js binaries.
+* `compare.R`: R script for statistically analyzing the output of
+ `compare.js`
+* `run.js`: command line tool for running individual benchmark suite(s).
+* `scatter.js`: command line tool for comparing the performance
+ between different parameters in benchmark configurations,
+ for example to analyze the time complexity.
+* `scatter.R`: R script for visualizing the output of `scatter.js` with
+ scatter plots.
+
+## Common API
+
+The common.js module is used by benchmarks for consistency across repeated
+tasks. It has a number of helpful functions and properties to help with
+writing benchmarks.
+
+### createBenchmark(fn, configs[, options])
+
+See [the guide on writing benchmarks](../doc/guides/writing-and-running-benchmarks.md#basics-of-a-benchmark).
+
+### default\_http\_benchmarker
+
+The default benchmarker used to run HTTP benchmarks.
+See [the guide on writing HTTP benchmarks](../doc/guides/writing-and-running-benchmarks.md#creating-an-http-benchmark).
+
+
+### PORT
+
+The default port used to run HTTP benchmarks.
+See [the guide on writing HTTP benchmarks](../doc/guides/writing-and-running-benchmarks.md#creating-an-http-benchmark).
+
+### sendResult(data)
+
+Used in special benchmarks that can't use `createBenchmark` and the object
+it returns to accomplish what they need. This function reports timing
+data to the parent process (usually created by running `compare.js`, `run.js` or
+`scatter.js`).
+
+### v8ForceOptimization(method[, ...args])
+
+Force V8 to mark the `method` for optimization with the native function
+`%OptimizeFunctionOnNextCall()` and return the optimization status
+after that.
+
+It can be used to prevent the benchmark from getting disrupted by the optimizer
+kicking in halfway through. However, this could result in a less effective
+optimization. In general, only use it if you know what it actually does.
diff --git a/benchmark/_benchmark_progress.js b/benchmark/_benchmark_progress.js
index 2a2a458c5c4e4b..4b42248f246858 100644
--- a/benchmark/_benchmark_progress.js
+++ b/benchmark/_benchmark_progress.js
@@ -15,9 +15,9 @@ function fraction(numerator, denominator) {
function getTime(diff) {
const time = Math.ceil(diff[0] + diff[1] / 1e9);
- const seconds = pad(time % 60, 2, '0');
- const minutes = pad(Math.floor(time / 60) % (60 * 60), 2, '0');
- const hours = pad(Math.floor(time / (60 * 60)), 2, '0');
+ const hours = pad(Math.floor(time / 3600), 2, '0');
+ const minutes = pad(Math.floor((time % 3600) / 60), 2, '0');
+ const seconds = pad((time % 3600) % 60, 2, '0');
return `${hours}:${minutes}:${seconds}`;
}
diff --git a/benchmark/dgram/bind-params.js b/benchmark/dgram/bind-params.js
new file mode 100644
index 00000000000000..92e9b7f85b1e12
--- /dev/null
+++ b/benchmark/dgram/bind-params.js
@@ -0,0 +1,38 @@
+'use strict';
+
+const common = require('../common.js');
+const dgram = require('dgram');
+
+const configs = {
+ n: [1e4],
+ port: ['true', 'false'],
+ address: ['true', 'false'],
+};
+
+const bench = common.createBenchmark(main, configs);
+
+function main(conf) {
+ const n = +conf.n;
+ const port = conf.port === 'true' ? 0 : undefined;
+ const address = conf.address === 'true' ? '0.0.0.0' : undefined;
+
+ if (port !== undefined && address !== undefined) {
+ bench.start();
+ for (let i = 0; i < n; i++) {
+ dgram.createSocket('udp4').bind(port, address).unref();
+ }
+ bench.end(n);
+ } else if (port !== undefined) {
+ bench.start();
+ for (let i = 0; i < n; i++) {
+ dgram.createSocket('udp4').bind(port).unref();
+ }
+ bench.end(n);
+ } else if (port === undefined && address === undefined) {
+ bench.start();
+ for (let i = 0; i < n; i++) {
+ dgram.createSocket('udp4').bind().unref();
+ }
+ bench.end(n);
+ }
+}
diff --git a/benchmark/domain/domain-fn-args.js b/benchmark/domain/domain-fn-args.js
index e9b24811c81689..20f452d67a6f9a 100644
--- a/benchmark/domain/domain-fn-args.js
+++ b/benchmark/domain/domain-fn-args.js
@@ -12,14 +12,14 @@ var gargs = [1, 2, 3];
function main(conf) {
- var args, n = +conf.n;
+ var n = +conf.n;
var myArguments = gargs.slice(0, conf.arguments);
bench.start();
bdomain.enter();
for (var i = 0; i < n; i++) {
if (myArguments.length >= 2) {
- args = Array.prototype.slice.call(myArguments, 1);
+ const args = Array.prototype.slice.call(myArguments, 1);
fn.apply(this, args);
} else {
fn.call(this);
diff --git a/benchmark/es/destructuring-bench.js b/benchmark/es/destructuring-bench.js
index 0e9b5e93f3b318..3288e009a08515 100644
--- a/benchmark/es/destructuring-bench.js
+++ b/benchmark/es/destructuring-bench.js
@@ -9,9 +9,9 @@ const bench = common.createBenchmark(main, {
});
function runSwapManual(n) {
- var i = 0, x, y, r;
+ var x, y, r;
bench.start();
- for (; i < n; i++) {
+ for (var i = 0; i < n; i++) {
x = 1, y = 2;
r = x;
x = y;
@@ -23,9 +23,9 @@ function runSwapManual(n) {
}
function runSwapDestructured(n) {
- var i = 0, x, y;
+ var x, y;
bench.start();
- for (; i < n; i++) {
+ for (var i = 0; i < n; i++) {
x = 1, y = 2;
[x, y] = [y, x];
assert.strictEqual(x, 2);
diff --git a/benchmark/fixtures/url-inputs.js b/benchmark/fixtures/url-inputs.js
new file mode 100644
index 00000000000000..7b1983f6faa590
--- /dev/null
+++ b/benchmark/fixtures/url-inputs.js
@@ -0,0 +1,30 @@
+'use strict';
+
+exports.urls = {
+ long: 'http://nodejs.org:89/docs/latest/api/foo/bar/qua/13949281/0f28b/' +
+ '/5d49/b3020/url.html#test?payload1=true&payload2=false&test=1' +
+ '&benchmark=3&foo=38.38.011.293&bar=1234834910480&test=19299&3992&' +
+ 'key=f5c65e1e98fe07e648249ad41e1cfdb0',
+ short: 'https://nodejs.org/en/blog/',
+ idn: 'http://你好你好.在线',
+ auth: 'https://user:pass@example.com/path?search=1',
+ file: 'file:///foo/bar/test/node.js',
+ ws: 'ws://localhost:9229/f46db715-70df-43ad-a359-7f9949f39868',
+ javascript: 'javascript:alert("node is awesome");',
+ percent: 'https://%E4%BD%A0/foo',
+ dot: 'https://example.org/./a/../b/./c'
+};
+
+exports.searchParams = {
+ noencode: 'foo=bar&baz=quux&xyzzy=thud',
+ multicharsep: 'foo=bar&&&&&&&&&&baz=quux&&&&&&&&&&xyzzy=thud',
+ encodefake: 'foo=%©ar&baz=%A©uux&xyzzy=%©ud',
+ encodemany: '%66%6F%6F=bar&%62%61%7A=quux&xyzzy=%74h%75d',
+ encodelast: 'foo=bar&baz=quux&xyzzy=thu%64',
+ multivalue: 'foo=bar&foo=baz&foo=quux&quuy=quuz',
+ multivaluemany: 'foo=bar&foo=baz&foo=quux&quuy=quuz&foo=abc&foo=def&' +
+ 'foo=ghi&foo=jkl&foo=mno&foo=pqr&foo=stu&foo=vwxyz',
+ manypairs: 'a&b&c&d&e&f&g&h&i&j&k&l&m&n&o&p&q&r&s&t&u&v&w&x&y&z',
+ manyblankpairs: '&&&&&&&&&&&&&&&&&&&&&&&&',
+ altspaces: 'foo+bar=baz+quux&xyzzy+thud=quuy+quuz&abc=def+ghi'
+};
diff --git a/benchmark/fs/bench-statSync.js b/benchmark/fs/bench-statSync.js
index ba1e8168b4aaf5..4bc2ecd65a3624 100644
--- a/benchmark/fs/bench-statSync.js
+++ b/benchmark/fs/bench-statSync.js
@@ -5,17 +5,35 @@ const fs = require('fs');
const bench = common.createBenchmark(main, {
n: [1e4],
- kind: ['lstatSync', 'statSync']
+ kind: ['fstatSync', 'lstatSync', 'statSync']
});
function main(conf) {
const n = conf.n >>> 0;
- const fn = fs[conf.kind];
-
- bench.start();
- for (var i = 0; i < n; i++) {
- fn(__filename);
+ var fn;
+ var i;
+ switch (conf.kind) {
+ case 'statSync':
+ case 'lstatSync':
+ fn = fs[conf.kind];
+ bench.start();
+ for (i = 0; i < n; i++) {
+ fn(__filename);
+ }
+ bench.end(n);
+ break;
+ case 'fstatSync':
+ fn = fs.fstatSync;
+ const fd = fs.openSync(__filename, 'r');
+ bench.start();
+ for (i = 0; i < n; i++) {
+ fn(fd);
+ }
+ bench.end(n);
+ fs.closeSync(fd);
+ break;
+ default:
+ throw new Error('Invalid kind argument');
}
- bench.end(n);
}
diff --git a/benchmark/os/loadavg.js b/benchmark/os/loadavg.js
new file mode 100644
index 00000000000000..6e3c57ed44b777
--- /dev/null
+++ b/benchmark/os/loadavg.js
@@ -0,0 +1,17 @@
+'use strict';
+
+const common = require('../common.js');
+const loadavg = require('os').loadavg;
+
+const bench = common.createBenchmark(main, {
+ n: [5e6]
+});
+
+function main(conf) {
+ const n = +conf.n;
+
+ bench.start();
+ for (var i = 0; i < n; ++i)
+ loadavg();
+ bench.end(n);
+}
diff --git a/benchmark/process/memoryUsage.js b/benchmark/process/memoryUsage.js
new file mode 100644
index 00000000000000..d68ef339b4d10b
--- /dev/null
+++ b/benchmark/process/memoryUsage.js
@@ -0,0 +1,16 @@
+'use strict';
+
+var common = require('../common.js');
+var bench = common.createBenchmark(main, {
+ n: [1e5]
+});
+
+function main(conf) {
+ var n = +conf.n;
+
+ bench.start();
+ for (var i = 0; i < n; i++) {
+ process.memoryUsage();
+ }
+ bench.end(n);
+}
diff --git a/benchmark/querystring/querystring-parse.js b/benchmark/querystring/querystring-parse.js
index fe14d95a53f0a0..2e2ec1417bcd35 100644
--- a/benchmark/querystring/querystring-parse.js
+++ b/benchmark/querystring/querystring-parse.js
@@ -3,17 +3,7 @@ var common = require('../common.js');
var querystring = require('querystring');
var v8 = require('v8');
-var inputs = {
- noencode: 'foo=bar&baz=quux&xyzzy=thud',
- multicharsep: 'foo=bar&&&&&&&&&&baz=quux&&&&&&&&&&xyzzy=thud',
- encodefake: 'foo=%©ar&baz=%A©uux&xyzzy=%©ud',
- encodemany: '%66%6F%6F=bar&%62%61%7A=quux&xyzzy=%74h%75d',
- encodelast: 'foo=bar&baz=quux&xyzzy=thu%64',
- multivalue: 'foo=bar&foo=baz&foo=quux&quuy=quuz',
- multivaluemany: 'foo=bar&foo=baz&foo=quux&quuy=quuz&foo=abc&foo=def&' +
- 'foo=ghi&foo=jkl&foo=mno&foo=pqr&foo=stu&foo=vwxyz',
- manypairs: 'a&b&c&d&e&f&g&h&i&j&k&l&m&n&o&p&q&r&s&t&u&v&w&x&y&z'
-};
+var inputs = require('../fixtures/url-inputs.js').searchParams;
var bench = common.createBenchmark(main, {
type: Object.keys(inputs),
diff --git a/benchmark/url/legacy-vs-whatwg-url-get-prop.js b/benchmark/url/legacy-vs-whatwg-url-get-prop.js
index f703b75b16f6f7..ffc8b4995df3de 100644
--- a/benchmark/url/legacy-vs-whatwg-url-get-prop.js
+++ b/benchmark/url/legacy-vs-whatwg-url-get-prop.js
@@ -3,19 +3,7 @@ const common = require('../common.js');
const url = require('url');
const URL = url.URL;
const assert = require('assert');
-
-const inputs = {
- long: 'http://nodejs.org:89/docs/latest/api/url.html#test?' +
- 'payload1=true&payload2=false&test=1&benchmark=3&' +
- 'foo=38.38.011.293&bar=1234834910480&test=19299&3992&' +
- 'key=f5c65e1e98fe07e648249ad41e1cfdb0',
- short: 'https://nodejs.org/en/blog/',
- idn: 'http://你好你好',
- auth: 'https://user:pass@example.com/path?search=1',
- special: 'file:///foo/bar/test/node.js',
- percent: 'https://%E4%BD%A0/foo',
- dot: 'https://example.org/./a/../b/./c'
-};
+const inputs = require('../fixtures/url-inputs.js').urls;
const bench = common.createBenchmark(main, {
type: Object.keys(inputs),
diff --git a/benchmark/url/legacy-vs-whatwg-url-parse.js b/benchmark/url/legacy-vs-whatwg-url-parse.js
index 64533e67e4a46a..ec386b7b85597d 100644
--- a/benchmark/url/legacy-vs-whatwg-url-parse.js
+++ b/benchmark/url/legacy-vs-whatwg-url-parse.js
@@ -3,19 +3,7 @@ const common = require('../common.js');
const url = require('url');
const URL = url.URL;
const assert = require('assert');
-
-const inputs = {
- long: 'http://nodejs.org:89/docs/latest/api/url.html#test?' +
- 'payload1=true&payload2=false&test=1&benchmark=3&' +
- 'foo=38.38.011.293&bar=1234834910480&test=19299&3992&' +
- 'key=f5c65e1e98fe07e648249ad41e1cfdb0',
- short: 'https://nodejs.org/en/blog/',
- idn: 'http://你好你好',
- auth: 'https://user:pass@example.com/path?search=1',
- special: 'file:///foo/bar/test/node.js',
- percent: 'https://%E4%BD%A0/foo',
- dot: 'https://example.org/./a/../b/./c'
-};
+const inputs = require('../fixtures/url-inputs.js').urls;
const bench = common.createBenchmark(main, {
type: Object.keys(inputs),
diff --git a/benchmark/url/legacy-vs-whatwg-url-searchparams-parse.js b/benchmark/url/legacy-vs-whatwg-url-searchparams-parse.js
index e76f2dd837bb97..86714df6c196a7 100644
--- a/benchmark/url/legacy-vs-whatwg-url-searchparams-parse.js
+++ b/benchmark/url/legacy-vs-whatwg-url-searchparams-parse.js
@@ -2,18 +2,7 @@
const common = require('../common.js');
const { URLSearchParams } = require('url');
const querystring = require('querystring');
-
-const inputs = {
- noencode: 'foo=bar&baz=quux&xyzzy=thud',
- encodemany: '%66%6F%6F=bar&%62%61%7A=quux&xyzzy=%74h%75d',
- encodefake: 'foo=%©ar&baz=%A©uux&xyzzy=%©ud',
- encodelast: 'foo=bar&baz=quux&xyzzy=thu%64',
- multicharsep: 'foo=bar&&&&&&&&&&baz=quux&&&&&&&&&&xyzzy=thud',
- multivalue: 'foo=bar&foo=baz&foo=quux&quuy=quuz',
- multivaluemany: 'foo=bar&foo=baz&foo=quux&quuy=quuz&foo=abc&foo=def&' +
- 'foo=ghi&foo=jkl&foo=mno&foo=pqr&foo=stu&foo=vwxyz',
- manypairs: 'a&b&c&d&e&f&g&h&i&j&k&l&m&n&o&p&q&r&s&t&u&v&w&x&y&z'
-};
+const inputs = require('../fixtures/url-inputs.js').searchParams;
const bench = common.createBenchmark(main, {
type: Object.keys(inputs),
diff --git a/benchmark/url/legacy-vs-whatwg-url-searchparams-serialize.js b/benchmark/url/legacy-vs-whatwg-url-searchparams-serialize.js
index ef3160eccd49e6..7e56b5fba6e4f8 100644
--- a/benchmark/url/legacy-vs-whatwg-url-searchparams-serialize.js
+++ b/benchmark/url/legacy-vs-whatwg-url-searchparams-serialize.js
@@ -2,18 +2,7 @@
const common = require('../common.js');
const { URLSearchParams } = require('url');
const querystring = require('querystring');
-
-const inputs = {
- noencode: 'foo=bar&baz=quux&xyzzy=thud',
- encodemany: '%66%6F%6F=bar&%62%61%7A=quux&xyzzy=%74h%75d',
- encodefake: 'foo=%©ar&baz=%A©uux&xyzzy=%©ud',
- encodelast: 'foo=bar&baz=quux&xyzzy=thu%64',
- multicharsep: 'foo=bar&&&&&&&&&&baz=quux&&&&&&&&&&xyzzy=thud',
- multivalue: 'foo=bar&foo=baz&foo=quux&quuy=quuz',
- multivaluemany: 'foo=bar&foo=baz&foo=quux&quuy=quuz&foo=abc&foo=def&' +
- 'foo=ghi&foo=jkl&foo=mno&foo=pqr&foo=stu&foo=vwxyz',
- manypairs: 'a&b&c&d&e&f&g&h&i&j&k&l&m&n&o&p&q&r&s&t&u&v&w&x&y&z'
-};
+const inputs = require('../fixtures/url-inputs.js').searchParams;
const bench = common.createBenchmark(main, {
type: Object.keys(inputs),
diff --git a/benchmark/url/legacy-vs-whatwg-url-serialize.js b/benchmark/url/legacy-vs-whatwg-url-serialize.js
index c0b7f5a6ce1565..911e79794b84fb 100644
--- a/benchmark/url/legacy-vs-whatwg-url-serialize.js
+++ b/benchmark/url/legacy-vs-whatwg-url-serialize.js
@@ -3,19 +3,7 @@ const common = require('../common.js');
const url = require('url');
const URL = url.URL;
const assert = require('assert');
-
-const inputs = {
- long: 'http://nodejs.org:89/docs/latest/api/url.html#test?' +
- 'payload1=true&payload2=false&test=1&benchmark=3&' +
- 'foo=38.38.011.293&bar=1234834910480&test=19299&3992&' +
- 'key=f5c65e1e98fe07e648249ad41e1cfdb0',
- short: 'https://nodejs.org/en/blog/',
- idn: 'http://你好你好',
- auth: 'https://user:pass@example.com/path?search=1',
- special: 'file:///foo/bar/test/node.js',
- percent: 'https://%E4%BD%A0/foo',
- dot: 'https://example.org/./a/../b/./c'
-};
+const inputs = require('../fixtures/url-inputs.js').urls;
const bench = common.createBenchmark(main, {
type: Object.keys(inputs),
diff --git a/benchmark/url/url-format.js b/benchmark/url/url-format.js
index 3f7df8a0bc4536..886958406b91c5 100644
--- a/benchmark/url/url-format.js
+++ b/benchmark/url/url-format.js
@@ -3,8 +3,13 @@ const common = require('../common.js');
const url = require('url');
const v8 = require('v8');
+const inputs = {
+ slashes: {slashes: true, host: 'localhost'},
+ file: {protocol: 'file:', pathname: '/foo'},
+};
+
const bench = common.createBenchmark(main, {
- type: 'one two'.split(' '),
+ type: Object.keys(inputs),
n: [25e6]
});
@@ -12,10 +17,6 @@ function main(conf) {
const type = conf.type;
const n = conf.n | 0;
- const inputs = {
- one: {slashes: true, host: 'localhost'},
- two: {protocol: 'file:', pathname: '/foo'},
- };
const input = inputs[type] || '';
// Force-optimize url.format() so that the benchmark doesn't get
diff --git a/benchmark/url/url-parse.js b/benchmark/url/url-parse.js
deleted file mode 100644
index 89679548b8f193..00000000000000
--- a/benchmark/url/url-parse.js
+++ /dev/null
@@ -1,37 +0,0 @@
-'use strict';
-var common = require('../common.js');
-var url = require('url');
-var v8 = require('v8');
-
-var bench = common.createBenchmark(main, {
- type: 'one two three four five six'.split(' '),
- n: [25e4]
-});
-
-function main(conf) {
- var type = conf.type;
- var n = conf.n | 0;
-
- var inputs = {
- one: 'http://nodejs.org/docs/latest/api/url.html#url_url_format_urlobj',
- two: 'http://blog.nodejs.org/',
- three: 'https://encrypted.google.com/search?q=url&q=site:npmjs.org&hl=en',
- four: 'javascript:alert("node is awesome");',
- five: 'some.ran/dom/url.thing?oh=yes#whoo',
- six: 'https://user:pass@example.com/',
- };
- var input = inputs[type] || '';
-
- // Force-optimize url.parse() so that the benchmark doesn't get
- // disrupted by the optimizer kicking in halfway through.
- for (var name in inputs)
- url.parse(inputs[name]);
-
- v8.setFlagsFromString('--allow_natives_syntax');
- eval('%OptimizeFunctionOnNextCall(url.parse)');
-
- bench.start();
- for (var i = 0; i < n; i += 1)
- url.parse(input);
- bench.end(n);
-}
diff --git a/benchmark/url/url-resolve.js b/benchmark/url/url-resolve.js
index 8372132e4d269e..4335511ca6d2df 100644
--- a/benchmark/url/url-resolve.js
+++ b/benchmark/url/url-resolve.js
@@ -1,36 +1,28 @@
'use strict';
-var common = require('../common.js');
-var url = require('url');
-var v8 = require('v8');
+const common = require('../common.js');
+const url = require('url');
+const v8 = require('v8');
+const hrefs = require('../fixtures/url-inputs.js').urls;
+hrefs.noscheme = 'some.ran/dom/url.thing?oh=yes#whoo';
-var hrefs = [
- 'http://example.com/',
- 'http://nodejs.org/docs/latest/api/url.html#url_url_format_urlobj',
- 'http://blog.nodejs.org/',
- 'https://encrypted.google.com/search?q=url&q=site:npmjs.org&hl=en',
- 'javascript:alert("node is awesome");',
- 'some.ran/dom/url.thing?oh=yes#whoo'
-];
+const paths = {
+ 'up': '../../../../../etc/passwd',
+ 'sibling': '../foo/bar?baz=boom',
+ 'foo/bar': 'foo/bar',
+ 'withscheme': 'http://nodejs.org',
+ 'down': './foo/bar?baz'
+};
-
-var paths = [
- '../../../../../etc/passwd',
- '../foo/bar?baz=boom',
- 'foo/bar',
- 'http://nodejs.org',
- './foo/bar?baz'
-];
-
-var bench = common.createBenchmark(main, {
+const bench = common.createBenchmark(main, {
href: Object.keys(hrefs),
path: Object.keys(paths),
n: [1e5]
});
function main(conf) {
- var n = conf.n | 0;
- var href = hrefs[conf.href];
- var path = paths[conf.path];
+ const n = conf.n | 0;
+ const href = hrefs[conf.href];
+ const path = paths[conf.path];
// Force-optimize url.resolve() so that the benchmark doesn't get
// disrupted by the optimizer kicking in halfway through.
diff --git a/benchmark/url/url-searchparams-sort.js b/benchmark/url/url-searchparams-sort.js
new file mode 100644
index 00000000000000..677ce511cf3ea2
--- /dev/null
+++ b/benchmark/url/url-searchparams-sort.js
@@ -0,0 +1,48 @@
+'use strict';
+const common = require('../common.js');
+const URLSearchParams = require('url').URLSearchParams;
+
+const inputs = {
+ empty: '',
+ sorted: 'a&b&c&d&e&f&g&h&i&j&k&l&m&n&o&p&q&r&s&t&u&v&w&x&y&z',
+ almostsorted: 'a&b&c&d&e&f&g&i&h&j&k&l&m&n&o&p&q&r&s&t&u&w&v&x&y&z',
+ reversed: 'z&y&x&w&v&u&t&s&r&q&p&o&n&m&l&k&j&i&h&g&f&e&d&c&b&a',
+ random: 'm&t&d&c&z&v&a&n&p&y&u&o&h&l&f&j&e&q&b&i&s&x&k&w&r&g',
+ // 8 parameters
+ short: 'm&t&d&c&z&v&a&n',
+ // 88 parameters
+ long: 'g&r&t&h&s&r&d&w&b&n&h&k&x&m&k&h&o&e&x&c&c&g&e&b&p&p&s&n&j&b&y&z&' +
+ 'u&l&o&r&w&a&u&l&m&f&j&q&p&f&e&y&e&n&e&l&m&w&u&w&t&n&t&q&v&y&c&o&' +
+ 'k&f&j&i&l&m&g&j&d&i&z&q&p&x&q&q&d&n&y&w&g&i&v&r'
+};
+
+function getParams(str) {
+ const out = [];
+ for (const key of str.split('&')) {
+ out.push(key, '');
+ }
+ return out;
+}
+
+const bench = common.createBenchmark(main, {
+ type: Object.keys(inputs),
+ n: [1e6]
+}, {
+ flags: ['--expose-internals']
+});
+
+function main(conf) {
+ const searchParams = require('internal/url').searchParamsSymbol;
+ const input = inputs[conf.type];
+ const n = conf.n | 0;
+ const params = new URLSearchParams();
+ const array = getParams(input);
+
+ var i;
+ bench.start();
+ for (i = 0; i < n; i++) {
+ params[searchParams] = array.slice();
+ params.sort();
+ }
+ bench.end(n);
+}
diff --git a/benchmark/url/whatwg-url-idna.js b/benchmark/url/whatwg-url-idna.js
new file mode 100644
index 00000000000000..41b4c639de97b6
--- /dev/null
+++ b/benchmark/url/whatwg-url-idna.js
@@ -0,0 +1,47 @@
+'use strict';
+const common = require('../common.js');
+const { domainToASCII, domainToUnicode } = require('url');
+
+const inputs = {
+ empty: {
+ ascii: '',
+ unicode: ''
+ },
+ none: {
+ ascii: 'passports',
+ unicode: 'passports'
+ },
+ some: {
+ ascii: 'Paßstraße',
+ unicode: 'xn--Pastrae-1vae'
+ },
+ all: {
+ ascii: '他们不说中文',
+ unicode: 'xn--ihqwczyycu19kkg2c'
+ },
+ nonstring: {
+ ascii: { toString() { return ''; } },
+ unicode: { toString() { return ''; } }
+ }
+};
+
+const bench = common.createBenchmark(main, {
+ input: Object.keys(inputs),
+ to: ['ascii', 'unicode'],
+ n: [5e6]
+});
+
+function main(conf) {
+ const n = conf.n | 0;
+ const to = conf.to;
+ const input = inputs[conf.input][to];
+ const method = to === 'ascii' ? domainToASCII : domainToUnicode;
+
+ common.v8ForceOptimization(method, input);
+
+ bench.start();
+ for (var i = 0; i < n; i++) {
+ method(input);
+ }
+ bench.end(n);
+}
diff --git a/benchmark/url/whatwg-url-properties.js b/benchmark/url/whatwg-url-properties.js
index 375939c601d363..9bdc9778a8c922 100644
--- a/benchmark/url/whatwg-url-properties.js
+++ b/benchmark/url/whatwg-url-properties.js
@@ -1,16 +1,11 @@
'use strict';
+const common = require('../common.js');
+const URL = require('url').URL;
+const inputs = require('../fixtures/url-inputs.js').urls;
-var common = require('../common.js');
-var URL = require('url').URL;
-
-var bench = common.createBenchmark(main, {
- url: [
- 'http://example.com/',
- 'https://encrypted.google.com/search?q=url&q=site:npmjs.org&hl=en',
- 'javascript:alert("node is awesome");',
- 'http://user:pass@foo.bar.com:21/aaa/zzz?l=24#test'
- ],
- prop: ['toString', 'href', 'origin', 'protocol',
+const bench = common.createBenchmark(main, {
+ input: Object.keys(inputs),
+ prop: ['href', 'origin', 'protocol',
'username', 'password', 'host', 'hostname', 'port',
'pathname', 'search', 'searchParams', 'hash'],
n: [1e4]
@@ -34,14 +29,6 @@ function get(n, url, prop) {
bench.end(n);
}
-function stringify(n, url, prop) {
- bench.start();
- for (var i = 0; i < n; i += 1) {
- url.toString();
- }
- bench.end(n);
-}
-
const alternatives = {
href: 'http://user:pass@foo.bar.com:21/aaa/zzz?l=25#test',
protocol: 'https:',
@@ -61,7 +48,8 @@ function getAlternative(prop) {
function main(conf) {
const n = conf.n | 0;
- const url = new URL(conf.url);
+ const input = inputs[conf.input];
+ const url = new URL(input);
const prop = conf.prop;
switch (prop) {
@@ -74,17 +62,13 @@ function main(conf) {
case 'pathname':
case 'search':
case 'hash':
+ case 'href':
setAndGet(n, url, prop, getAlternative(prop));
break;
- // TODO: move href to the first group when the setter lands.
- case 'href':
case 'origin':
case 'searchParams':
get(n, url, prop);
break;
- case 'toString':
- stringify(n, url);
- break;
default:
throw new Error('Unknown prop');
}
diff --git a/common.gypi b/common.gypi
index 0fa36197be97a9..a52915d59a7ac4 100644
--- a/common.gypi
+++ b/common.gypi
@@ -42,12 +42,19 @@
'os_posix': 1,
'v8_postmortem_support%': 'true',
}],
- ['GENERATOR == "ninja" or OS== "mac"', {
+ ['OS== "mac"', {
'OBJ_DIR': '<(PRODUCT_DIR)/obj',
'V8_BASE': '<(PRODUCT_DIR)/libv8_base.a',
}, {
- 'OBJ_DIR': '<(PRODUCT_DIR)/obj.target',
- 'V8_BASE': '<(PRODUCT_DIR)/obj.target/deps/v8/src/libv8_base.a',
+ 'conditions': [
+ ['GENERATOR=="ninja"', {
+ 'OBJ_DIR': '<(PRODUCT_DIR)/obj',
+ 'V8_BASE': '<(PRODUCT_DIR)/obj/deps/v8/src/libv8_base.a',
+ }, {
+ 'OBJ_DIR': '<(PRODUCT_DIR)/obj.target',
+ 'V8_BASE': '<(PRODUCT_DIR)/obj.target/deps/v8/src/libv8_base.a',
+ }],
+ ],
}],
['openssl_fips != ""', {
'OPENSSL_PRODUCT': 'libcrypto.a',
diff --git a/configure b/configure
index d0b50f80f9f07d..50078df43a9ed5 100755
--- a/configure
+++ b/configure
@@ -2,7 +2,16 @@
import sys
if sys.version_info[0] != 2 or sys.version_info[1] not in (6, 7):
- sys.stdout.write("Please use either Python 2.6 or 2.7\n")
+ sys.stderr.write('Please use either Python 2.6 or 2.7')
+
+ from distutils.spawn import find_executable as which
+ python2 = which('python2') or which('python2.6') or which('python2.7')
+
+ if python2:
+ sys.stderr.write(':\n\n')
+ sys.stderr.write(' ' + python2 + ' ' + ' '.join(sys.argv))
+
+ sys.stderr.write('\n')
sys.exit(1)
import errno
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index b216cf04c30a00..f2c738770a43b4 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -11,7 +11,7 @@
#define V8_MAJOR_VERSION 5
#define V8_MINOR_VERSION 5
#define V8_BUILD_NUMBER 372
-#define V8_PATCH_LEVEL 40
+#define V8_PATCH_LEVEL 41
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index a2eb7b8f22c9b8..1a3523745b9fae 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -245,7 +245,7 @@ void InstructionSelector::VisitStore(Node* node) {
MachineRepresentation rep = store_rep.representation();
if (write_barrier_kind != kNoWriteBarrier) {
- DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ DCHECK(CanBeTaggedPointer(rep));
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
index f1aa332a493ce4..c4cd3c92993ac4 100644
--- a/deps/v8/src/compiler/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
@@ -343,7 +343,7 @@ void InstructionSelector::VisitStore(Node* node) {
MachineRepresentation rep = store_rep.representation();
if (write_barrier_kind != kNoWriteBarrier) {
- DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ DCHECK(CanBeTaggedPointer(rep));
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
diff --git a/doc/STYLE_GUIDE.md b/doc/STYLE_GUIDE.md
new file mode 100644
index 00000000000000..10f26421a4ceb5
--- /dev/null
+++ b/doc/STYLE_GUIDE.md
@@ -0,0 +1,63 @@
+# Style Guide
+
+* Documents are written in markdown files.
+* Those files should be written in **`lowercase-with-dashes.md`.**
+ * Underscores in filenames are allowed only when they are present in the
+ topic the document will describe (e.g., `child_process`.)
+ * Filenames should be **lowercase**.
+ * Some files, such as top-level markdown files, are exceptions.
+ * Older files may use the `.markdown` extension. These may be ported to `.md`
+ at will. **Prefer `.md` for all new documents.**
+* Documents should be word-wrapped at 80 characters.
+* The formatting described in `.editorconfig` is preferred.
+ * A [plugin][] is available for some editors to automatically apply these rules.
+* Mechanical issues, like spelling and grammar, should be identified by tools,
+ insofar as is possible. If not caught by a tool, they should be pointed out by
+ human reviewers.
+* American English spelling is preferred. "Capitalize" vs. "Capitalise",
+ "color" vs. "colour", etc.
+* Though controversial, the [Oxford comma][] is preferred for clarity's sake.
+* Generally avoid personal pronouns in reference documentation ("I", "you",
+ "we".)
+ * Pronouns are acceptable in more colloquial documentation, like guides.
+ * Use **gender-neutral pronouns** and **mass nouns**. Non-comprehensive
+ examples:
+ * **OK**: "they", "their", "them", "folks", "people", "developers", "cats"
+ * **NOT OK**: "his", "hers", "him", "her", "guys", "dudes".
+* When combining wrapping elements (parentheses and quotes), terminal
+ punctuation should be placed:
+ * Inside the wrapping element if the wrapping element contains a complete
+ clause — a subject, verb, and an object.
+ * Outside of the wrapping element if the wrapping element contains only a
+ fragment of a clause.
+* Place end-of-sentence punctuation inside wrapping elements — periods go
+ inside parentheses and quotes, not after.
+* Documents must start with a level-one heading. An example document will be
+ linked here eventually.
+* Prefer affixing links to inlining links — prefer `[a link][]` to
+ `[a link](http://example.com)`.
+* When documenting APIs, note the version the API was introduced in at
+ the end of the section. If an API has been deprecated, also note the first
+ version that the API appeared deprecated in.
+* When using dashes, use emdashes ("—", Ctrl+Alt+"-" on OSX) surrounded by
+ spaces, per the New York Times usage.
+* Including assets:
+ * If you wish to add an illustration or full program, add it to the
+ appropriate sub-directory in the `assets/` dir.
+ * Link to it like so: `[Asset](/assets/{subdir}/{filename})` for file-based
+ assets, and `` for image-based assets.
+ * For illustrations, prefer SVG to other assets. When SVG is not feasible,
+ please keep a close eye on the filesize of the asset you're introducing.
+* For code blocks:
+ * Use language aware fences. ("```js")
+ * Code need not be complete — treat code blocks as an illustration or aid to
+ your point, not as complete running programs. If a complete running program
+ is necessary, include it as an asset in `assets/code-examples` and link to
+ it.
+* When using underscores, asterisks and backticks please use proper escaping (**\\\_**, **\\\*** and **\\\`** instead of **\_**, **\*** and **\`**)
+* References to constructor functions should use PascalCase
+* References to constructor instances should be camelCased
+* References to methods should be used with parenthesis: `socket.end()` instead of `socket.end`
+
+[plugin]: http://editorconfig.org/#download
+[Oxford comma]: https://en.wikipedia.org/wiki/Serial_comma
diff --git a/doc/api/_toc.md b/doc/api/_toc.md
index 2527ad84e2e360..345e8e393145fd 100644
--- a/doc/api/_toc.md
+++ b/doc/api/_toc.md
@@ -35,6 +35,7 @@
* [String Decoder](string_decoder.html)
* [Timers](timers.html)
* [TLS/SSL](tls.html)
+* [Tracing](tracing.html)
* [TTY](tty.html)
* [UDP/Datagram](dgram.html)
* [URL](url.html)
diff --git a/doc/api/all.md b/doc/api/all.md
index 93c7a300a162e9..f65b24587511a1 100644
--- a/doc/api/all.md
+++ b/doc/api/all.md
@@ -1,15 +1,15 @@
@include documentation
@include synopsis
-@include addons
@include assert
@include buffer
+@include addons
@include child_process
@include cluster
@include cli
@include console
@include crypto
@include debugger
-@include dgram
+@include deprecations
@include dns
@include domain
@include errors
@@ -31,7 +31,9 @@
@include string_decoder
@include timers
@include tls
+@include tracing
@include tty
+@include dgram
@include url
@include util
@include v8
diff --git a/doc/api/assert.md b/doc/api/assert.md
index 8c4279f5d03d22..8478c235e783b6 100644
--- a/doc/api/assert.md
+++ b/doc/api/assert.md
@@ -1,6 +1,6 @@
# Assert
-> Stability: 3 - Locked
+> Stability: 2 - Stable
The `assert` module provides a simple set of assertion tests that can be used to
test invariants.
@@ -9,6 +9,8 @@ test invariants.
+* `value` {any}
+* `message` {any}
An alias of [`assert.ok()`][] .
@@ -30,7 +32,20 @@ assert(false, 'it\'s false');
## assert.deepEqual(actual, expected[, message])
+* `actual` {any}
+* `expected` {any}
+* `message` {any}
Tests for deep equality between the `actual` and `expected` parameters.
Primitive values are compared with the equal comparison operator ( `==` ).
@@ -91,7 +106,20 @@ parameter is undefined, a default error message is assigned.
## assert.deepStrictEqual(actual, expected[, message])
+* `actual` {any}
+* `expected` {any}
+* `message` {any}
Generally identical to `assert.deepEqual()` with two exceptions. First,
primitive values are compared using the strict equality operator ( `===` ).
@@ -115,7 +143,17 @@ parameter is undefined, a default error message is assigned.
## assert.doesNotThrow(block[, error][, message])
+* `block` {Function}
+* `error` {RegExp|Function}
+* `message` {any}
Asserts that the function `block` does not throw an error. See
[`assert.throws()`][] for more details.
@@ -171,6 +209,9 @@ assert.doesNotThrow(
+* `actual` {any}
+* `expected` {any}
+* `message` {any}
Tests shallow, coercive equality between the `actual` and `expected` parameters
using the equal comparison operator ( `==` ).
@@ -197,6 +238,10 @@ parameter is undefined, a default error message is assigned.
+* `actual` {any}
+* `expected` {any}
+* `message` {any}
+* `operator` {String}
Throws an `AssertionError`. If `message` is falsy, the error message is set as
the values of `actual` and `expected` separated by the provided `operator`.
@@ -216,6 +261,7 @@ assert.fail(1, 2, 'whoops', '>');
+* `value` {any}
Throws `value` if `value` is truthy. This is useful when testing the `error`
argument in callbacks.
@@ -237,6 +283,9 @@ assert.ifError(new Error());
+* `actual` {any}
+* `expected` {any}
+* `message` {any}
Tests for any deep inequality. Opposite of [`assert.deepEqual()`][].
@@ -281,6 +330,9 @@ parameter is undefined, a default error message is assigned.
+* `actual` {any}
+* `expected` {any}
+* `message` {any}
Tests for deep strict inequality. Opposite of [`assert.deepStrictEqual()`][].
@@ -302,6 +354,9 @@ the `message` parameter is undefined, a default error message is assigned.
+* `actual` {any}
+* `expected` {any}
+* `message` {any}
Tests shallow, coercive inequality with the not equal comparison operator
( `!=` ).
@@ -327,6 +382,9 @@ parameter is undefined, a default error message is assigned.
+* `actual` {any}
+* `expected` {any}
+* `message` {any}
Tests strict inequality as determined by the strict not equal operator
( `!==` ).
@@ -352,6 +410,8 @@ If the values are strictly equal, an `AssertionError` is thrown with a
+* `value` {any}
+* `message` {any}
Tests if `value` is truthy. It is equivalent to
`assert.equal(!!value, true, message)`.
@@ -379,6 +439,9 @@ assert.ok(false, 'it\'s false');
+* `actual` {any}
+* `expected` {any}
+* `message` {any}
Tests strict equality as determined by the strict equality operator ( `===` ).
@@ -402,7 +465,14 @@ If the values are not strictly equal, an `AssertionError` is thrown with a
## assert.throws(block[, error][, message])
+* `block` {Function}
+* `error` {RegExp|Function}
+* `message` {any}
Expects the function `block` to throw an error.
diff --git a/doc/api/buffer.md b/doc/api/buffer.md
index 14212d3ed60d03..9aa7d0d9cbbbda 100644
--- a/doc/api/buffer.md
+++ b/doc/api/buffer.md
@@ -138,6 +138,15 @@ extra care *must* be taken in order to avoid introducing security
vulnerabilities into an application.
## Buffers and Character Encodings
+
`Buffer` instances are commonly used to represent sequences of encoded characters
such as UTF-8, UCS2, Base64 or even Hex-encoded data. It is possible to
@@ -188,6 +197,12 @@ that the server actually returned win-1252-encoded data, and using `'latin1'`
encoding may incorrectly decode the characters.
## Buffers and TypedArray
+
`Buffer` instances are also [`Uint8Array`] instances. However, there are subtle
incompatibilities with the TypedArray specification in ECMAScript 2015.
@@ -298,6 +313,13 @@ It can be constructed in a variety of ways.
### new Buffer(array)
> Stability: 0 - Deprecated: Use [`Buffer.from(array)`] instead.
@@ -313,35 +335,20 @@ Example:
const buf = new Buffer([0x62, 0x75, 0x66, 0x66, 0x65, 0x72]);
```
-### new Buffer(buffer)
-
-
-> Stability: 0 - Deprecated: Use [`Buffer.from(buffer)`] instead.
-
-* `buffer` {Buffer} An existing `Buffer` to copy data from
-
-Copies the passed `buffer` data onto a new `Buffer` instance.
-
-Example:
-
-```js
-const buf1 = new Buffer('buffer');
-const buf2 = new Buffer(buf1);
-
-buf1[0] = 0x61;
-
-// Prints: auffer
-console.log(buf1.toString());
-
-// Prints: buffer
-console.log(buf2.toString());
-```
-
### new Buffer(arrayBuffer[, byteOffset [, length]])
> Stability: 0 - Deprecated: Use
@@ -383,9 +390,49 @@ arr[1] = 6000;
console.log(buf);
```
+### new Buffer(buffer)
+
+
+> Stability: 0 - Deprecated: Use [`Buffer.from(buffer)`] instead.
+
+* `buffer` {Buffer} An existing `Buffer` to copy data from
+
+Copies the passed `buffer` data onto a new `Buffer` instance.
+
+Example:
+
+```js
+const buf1 = new Buffer('buffer');
+const buf2 = new Buffer(buf1);
+
+buf1[0] = 0x61;
+
+// Prints: auffer
+console.log(buf1.toString());
+
+// Prints: buffer
+console.log(buf2.toString());
+```
+
### new Buffer(size)
> Stability: 0 - Deprecated: Use [`Buffer.alloc()`] instead (also see
@@ -419,6 +466,13 @@ console.log(buf);
### new Buffer(string[, encoding])
> Stability: 0 - Deprecated:
@@ -508,6 +562,10 @@ A `TypeError` will be thrown if `size` is not a number.
### Class Method: Buffer.allocUnsafe(size)
* `size` {Integer} The desired length of the new `Buffer`
@@ -606,6 +664,14 @@ A `TypeError` will be thrown if `size` is not a number.
### Class Method: Buffer.byteLength(string[, encoding])
* `string` {String | Buffer | TypedArray | DataView | ArrayBuffer} A value to
@@ -886,6 +952,10 @@ console.log(buf.toString('ascii'));
### buf.compare(target[, targetStart[, targetEnd[, sourceStart[, sourceEnd]]]])
* `target` {Buffer} A `Buffer` to compare to
@@ -964,7 +1034,7 @@ A `RangeError` will be thrown if: `targetStart < 0`, `sourceStart < 0`,
added: v0.1.90
-->
-* `target` {Buffer} A `Buffer` to copy into.
+* `target` {Buffer|Uint8Array} A `Buffer` or [`Uint8Array`] to copy into.
* `targetStart` {Integer} The offset within `target` at which to begin
copying to. **Default:** `0`
* `sourceStart` {Integer} The offset within `buf` at which to begin copying from.
@@ -1066,6 +1136,10 @@ console.log(buf1.equals(buf3));
### buf.fill(value[, offset[, end]][, encoding])
* `value` {String | Buffer | Integer} The value to fill `buf` with
@@ -1100,9 +1174,55 @@ Example: Fill a `Buffer` with a two-byte character
console.log(Buffer.allocUnsafe(3).fill('\u0222'));
```
+### buf.includes(value[, byteOffset][, encoding])
+
+
+* `value` {String | Buffer | Integer} What to search for
+* `byteOffset` {Integer} Where to begin searching in `buf`. **Default:** `0`
+* `encoding` {String} If `value` is a string, this is its encoding.
+ **Default:** `'utf8'`
+* Returns: {Boolean} `true` if `value` was found in `buf`, `false` otherwise
+
+Equivalent to [`buf.indexOf() !== -1`][`buf.indexOf()`].
+
+Examples:
+
+```js
+const buf = Buffer.from('this is a buffer');
+
+// Prints: true
+console.log(buf.includes('this'));
+
+// Prints: true
+console.log(buf.includes('is'));
+
+// Prints: true
+console.log(buf.includes(Buffer.from('a buffer')));
+
+// Prints: true
+// (97 is the decimal ASCII value for 'a')
+console.log(buf.includes(97));
+
+// Prints: false
+console.log(buf.includes(Buffer.from('a buffer example')));
+
+// Prints: true
+console.log(buf.includes(Buffer.from('a buffer example').slice(0, 8)));
+
+// Prints: false
+console.log(buf.includes('this', 4));
+```
+
### buf.indexOf(value[, byteOffset][, encoding])
* `value` {String | Buffer | Integer} What to search for
@@ -1179,47 +1299,6 @@ console.log(b.indexOf('b', null));
console.log(b.indexOf('b', []));
```
-### buf.includes(value[, byteOffset][, encoding])
-
-
-* `value` {String | Buffer | Integer} What to search for
-* `byteOffset` {Integer} Where to begin searching in `buf`. **Default:** `0`
-* `encoding` {String} If `value` is a string, this is its encoding.
- **Default:** `'utf8'`
-* Returns: {Boolean} `true` if `value` was found in `buf`, `false` otherwise
-
-Equivalent to [`buf.indexOf() !== -1`][`buf.indexOf()`].
-
-Examples:
-
-```js
-const buf = Buffer.from('this is a buffer');
-
-// Prints: true
-console.log(buf.includes('this'));
-
-// Prints: true
-console.log(buf.includes('is'));
-
-// Prints: true
-console.log(buf.includes(Buffer.from('a buffer')));
-
-// Prints: true
-// (97 is the decimal ASCII value for 'a')
-console.log(buf.includes(97));
-
-// Prints: false
-console.log(buf.includes(Buffer.from('a buffer example')));
-
-// Prints: true
-console.log(buf.includes(Buffer.from('a buffer example').slice(0, 8)));
-
-// Prints: false
-console.log(buf.includes('this', 4));
-```
-
### buf.keys()
* `start` {Integer} Where the new `Buffer` will start. **Default:** `0`
@@ -1859,6 +1947,35 @@ buf2.swap64();
Note that JavaScript cannot encode 64-bit integers. This method is intended
for working with 64-bit floats.
+### buf.toJSON()
+
+
+* Returns: {Object}
+
+Returns a JSON representation of `buf`. [`JSON.stringify()`] implicitly calls
+this function when stringifying a `Buffer` instance.
+
+Example:
+
+```js
+const buf = Buffer.from([0x1, 0x2, 0x3, 0x4, 0x5]);
+const json = JSON.stringify(buf);
+
+// Prints: {"type":"Buffer","data":[1,2,3,4,5]}
+console.log(json);
+
+const copy = JSON.parse(json, (key, value) => {
+ return value && value.type === 'Buffer'
+ ? Buffer.from(value.data)
+ : value;
+});
+
+// Prints:
+console.log(copy);
+```
+
### buf.toString([encoding[, start[, end]]])
-
-* Returns: {Object}
-
-Returns a JSON representation of `buf`. [`JSON.stringify()`] implicitly calls
-this function when stringifying a `Buffer` instance.
-
-Example:
-
-```js
-const buf = Buffer.from([0x1, 0x2, 0x3, 0x4, 0x5]);
-const json = JSON.stringify(buf);
-
-// Prints: {"type":"Buffer","data":[1,2,3,4,5]}
-console.log(json);
-
-const copy = JSON.parse(json, (key, value) => {
- return value && value.type === 'Buffer'
- ? Buffer.from(value.data)
- : value;
-});
-
-// Prints:
-console.log(copy);
-```
-
### buf.values()
* `modulePath` {String} The module to run in the child
@@ -302,6 +306,13 @@ not clone the current process.*
### child_process.spawn(command[, args][, options])
* `command` {String} The command to run
@@ -485,6 +496,10 @@ child.unref();
#### options.stdio
The `options.stdio` option is used to configure the pipes that are established
@@ -554,9 +569,9 @@ spawn('prg', [], { stdio: ['pipe', null, null, null, 'pipe'] });
*It is worth noting that when an IPC channel is established between the
parent and child processes, and the child is a Node.js process, the child
is launched with the IPC channel unreferenced (using `unref()`) until the
-child registers an event handler for the [`process.on('disconnect')`][] event.
-This allows the child to exit normally without the process being held open
-by the open IPC channel.*
+child registers an event handler for the [`process.on('disconnect')`][] event
+or the [`process.on('message')`][] event.This allows the child to exit normally
+without the process being held open by the open IPC channel.*
See also: [`child_process.exec()`][] and [`child_process.fork()`][]
@@ -574,6 +589,10 @@ configuration at startup.
### child_process.execFileSync(file[, args][, options])
* `file` {String} The name or path of the executable file to run
@@ -660,6 +679,13 @@ execution.**
### child_process.spawnSync(command[, args][, options])
* `command` {String} The command to run
@@ -911,6 +937,17 @@ grep.stdin.end();
### child.send(message[, sendHandle[, options]][, callback])
* `message` {Object}
diff --git a/doc/api/cli.md b/doc/api/cli.md
index 809bd326a6273c..538b1e06afdb12 100644
--- a/doc/api/cli.md
+++ b/doc/api/cli.md
@@ -43,6 +43,10 @@ The output of this option is less detailed than this document.
### `-e`, `--eval "script"`
Evaluate the following argument as JavaScript. The modules which are
@@ -52,6 +56,10 @@ predefined in the REPL can also be used in `script`.
### `-p`, `--print "script"`
Identical to `-e` but prints the result.
@@ -59,7 +67,9 @@ Identical to `-e` but prints the result.
### `-c`, `--check`
Syntax check the script without executing.
@@ -129,6 +139,20 @@ added: v2.1.0
Prints a stack trace whenever synchronous I/O is detected after the first turn
of the event loop.
+### `--trace-events-enabled`
+
+
+Enables the collection of trace event tracing information.
+
+### `--trace-event-categories`
+
+
+A comma separated list of categories that should be traced when trace event
+tracing is enabled using `--trace-events-enabled`.
### `--zero-fill-buffers`
+
+Load an OpenSSL configuration file on startup. Among other uses, this can be
+used to enable FIPS-compliant crypto if Node.js is built with `./configure
+\-\-openssl\-fips`.
+
+If the [`--openssl-config`][] command line option is used, the environment
+variable is ignored.
+
### `SSL_CERT_DIR=dir`
+
If `--use-openssl-ca` is enabled, this overrides and sets OpenSSL's directory
containing trusted certificates.
@@ -373,6 +412,9 @@ evironment variable will be inherited by any child processes, and if they use
OpenSSL, it may cause them to trust the same CAs as node.
### `SSL_CERT_FILE=file`
+
If `--use-openssl-ca` is enabled, this overrides and sets OpenSSL's file
containing trusted certificates.
@@ -386,3 +428,4 @@ OpenSSL, it may cause them to trust the same CAs as node.
[debugger]: debugger.html
[REPL]: repl.html
[SlowBuffer]: buffer.html#buffer_class_slowbuffer
+[`--openssl-config`]: #cli_openssl_config_file
diff --git a/doc/api/cluster.md b/doc/api/cluster.md
index 01a2aaf6a8cc86..d0ec3f4ed5d277 100644
--- a/doc/api/cluster.md
+++ b/doc/api/cluster.md
@@ -260,6 +260,10 @@ It is not emitted in the worker.
### worker.disconnect()
* Returns: {Worker} A reference to `worker`.
@@ -416,6 +420,10 @@ accidental disconnection.
### worker.send(message[, sendHandle][, callback])
* `message` {Object}
@@ -449,6 +457,10 @@ if (cluster.isMaster) {
> Stability: 0 - Deprecated: Use [`worker.exitedAfterDisconnect`][] instead.
@@ -579,6 +591,13 @@ The `addressType` is one of:
* `"udp4"` or `"udp6"` (UDP v4 or v6)
## Event: 'message'
+
* `worker` {cluster.Worker}
* `message` {Object}
@@ -708,6 +727,10 @@ values are `"rr"` and `"none"`.
## cluster.settings
* {Object}
@@ -732,6 +755,10 @@ This object is not supposed to be changed or set manually, by you.
## cluster.setupMaster([settings])
* `settings` {Object}
diff --git a/doc/api/console.md b/doc/api/console.md
index 7315ca5192be64..45566a9cdf586d 100644
--- a/doc/api/console.md
+++ b/doc/api/console.md
@@ -9,10 +9,15 @@ The module exports two specific components:
* A `Console` class with methods such as `console.log()`, `console.error()` and
`console.warn()` that can be used to write to any Node.js stream.
-* A global `console` instance configured to write to `stdout` and `stderr`.
- Because this object is global, it can be used without calling
+* A global `console` instance configured to write to [`process.stdout`][] and
+ [`process.stderr`][]. The global `console` can be used without calling
`require('console')`.
+***Warning***: The global console object's methods are neither consistently
+synchronous like the browser APIs they resemble, nor are they consistently
+asynchronous like all other Node.js streams. See the [note on process I/O][] for
+more information.
+
Example using the global `console`:
```js
@@ -47,21 +52,6 @@ myConsole.warn(`Danger ${name}! Danger!`);
// Prints: Danger Will Robinson! Danger!, to err
```
-While the API for the `Console` class is designed fundamentally around the
-browser `console` object, the `Console` in Node.js is *not* intended to
-duplicate the browser's functionality exactly.
-
-## Asynchronous vs Synchronous Consoles
-
-The console functions are usually asynchronous unless the destination is a file.
-Disks are fast and operating systems normally employ write-back caching;
-it should be a very rare occurrence indeed that a write blocks, but it
-is possible.
-
-Additionally, console functions are blocking when outputting to TTYs
-(terminals) on OS X as a workaround for the OS's very small, 1kb buffer size.
-This is to prevent interleaving between `stdout` and `stderr`.
-
## Class: Console
@@ -246,6 +236,11 @@ milliseconds to `stdout`. Timer durations are accurate to the sub-millisecond.
### console.timeEnd(label)
Stops a timer that was previously started by calling [`console.time()`][] and
@@ -305,4 +300,5 @@ The `console.warn()` function is an alias for [`console.error()`][].
[`util.format()`]: util.html#util_util_format_format_args
[`util.inspect()`]: util.html#util_util_inspect_object_options
[customizing `util.inspect()` colors]: util.html#util_customizing_util_inspect_colors
+[note on process I/O]: process.html#process_a_note_on_process_i_o
[web-api-assert]: https://developer.mozilla.org/en-US/docs/Web/API/console/assert
diff --git a/doc/api/crypto.md b/doc/api/crypto.md
index 9102f2e89c03c4..d14c787a2b3c60 100644
--- a/doc/api/crypto.md
+++ b/doc/api/crypto.md
@@ -229,6 +229,10 @@ Returns `this` for method chaining.
### cipher.update(data[, input_encoding][, output_encoding])
Updates the cipher with `data`. If the `input_encoding` argument is given,
@@ -327,6 +331,10 @@ than once will result in an error being thrown.
### decipher.setAAD(buffer)
When using an authenticated encryption mode (only `GCM` is currently
@@ -338,6 +346,10 @@ Returns `this` for method chaining.
### decipher.setAuthTag(buffer)
When using an authenticated encryption mode (only `GCM` is currently
@@ -368,6 +380,10 @@ Returns `this` for method chaining.
### decipher.update(data[, input_encoding][, output_encoding])
Updates the decipher with `data`. If the `input_encoding` argument is given,
@@ -548,6 +564,10 @@ assert.strictEqual(aliceSecret.toString('hex'), bobSecret.toString('hex'));
### ecdh.computeSecret(other_public_key[, input_encoding][, output_encoding])
Computes the shared secret using `other_public_key` as the other
@@ -731,6 +751,10 @@ called. Multiple calls will cause an error to be thrown.
### hash.update(data[, input_encoding])
Updates the hash content with the given `data`, the encoding of which
@@ -813,6 +837,10 @@ called. Multiple calls to `hmac.digest()` will result in an error being thrown.
### hmac.update(data[, input_encoding])
Updates the `Hmac` content with the given `data`, the encoding of which
@@ -914,6 +942,10 @@ called. Multiple calls to `sign.sign()` will result in an error being thrown.
### sign.update(data[, input_encoding])
Updates the `Sign` content with the given `data`, the encoding of which
@@ -972,6 +1004,10 @@ console.log(verify.verify(publicKey, signature));
### verifier.update(data[, input_encoding])
Updates the `Verify` content with the given `data`, the encoding of which
@@ -1133,6 +1169,11 @@ The `key` is the raw key used by the `algorithm` and `iv` is an
### crypto.createDiffieHellman(prime[, prime_encoding][, generator][, generator_encoding])
Creates a `DiffieHellman` key exchange object using the supplied `prime` and an
@@ -1334,6 +1375,15 @@ console.log(hashes); // ['DSA', 'DSA-SHA', 'DSA-SHA1', ...]
### crypto.pbkdf2(password, salt, iterations, keylen, digest, callback)
Provides an asynchronous Password-Based Key Derivation Function 2 (PBKDF2)
@@ -1369,6 +1419,15 @@ An array of supported digest functions can be retrieved using
### crypto.pbkdf2Sync(password, salt, iterations, keylen, digest)
Provides a synchronous Password-Based Key Derivation Function 2 (PBKDF2)
@@ -1419,22 +1478,6 @@ keys:
All paddings are defined in `crypto.constants`.
-### crypto.timingSafeEqual(a, b)
-
-
-Returns true if `a` is equal to `b`, without leaking timing information that
-would allow an attacker to guess one of the values. This is suitable for
-comparing HMAC digests or secret values like authentication cookies or
-[capability urls](https://www.w3.org/TR/capability-urls/).
-
-`a` and `b` must both be `Buffer`s, and they must have the same length.
-
-**Note**: Use of `crypto.timingSafeEqual` does not guarantee that the
-*surrounding* code is timing-safe. Care should be taken to ensure that the
-surrounding code does not introduce timing vulnerabilities.
-
### crypto.privateEncrypt(private_key, buffer)
+
+Returns true if `a` is equal to `b`, without leaking timing information that
+would allow an attacker to guess one of the values. This is suitable for
+comparing HMAC digests or secret values like authentication cookies or
+[capability urls](https://www.w3.org/TR/capability-urls/).
+
+`a` and `b` must both be `Buffer`s, and they must have the same length.
+
+**Note**: Use of `crypto.timingSafeEqual` does not guarantee that the
+*surrounding* code is timing-safe. Care should be taken to ensure that the
+surrounding code does not introduce timing vulnerabilities.
+
## Notes
### Legacy Streams API (pre Node.js v0.10)
diff --git a/doc/api/debugger.md b/doc/api/debugger.md
index 7112b403fe35f3..b9503ca8d7b72a 100644
--- a/doc/api/debugger.md
+++ b/doc/api/debugger.md
@@ -190,8 +190,8 @@ V8 Inspector can be enabled by passing the `--inspect` flag when starting a
Node.js application. It is also possible to supply a custom port with that flag,
e.g. `--inspect=9222` will accept DevTools connections on port 9222.
-To break on the first line of the application code, provide the `--debug-brk`
-flag in addition to `--inspect`.
+To break on the first line of the application code, pass the `--inspect-brk`
+flag instead of `--inspect`.
```txt
$ node --inspect index.js
@@ -201,4 +201,4 @@ To start debugging, open the following URL in Chrome:
chrome-devtools://devtools/remote/serve_file/@60cd6e859b9f557d2312f5bf532f6aec5f284980/inspector.html?experiments=true&v8only=true&ws=localhost:9229/node
```
-[TCP-based protocol]: https://github.com/v8/v8/wiki/Debugging-Protocol
+[TCP-based protocol]: https://github.com/v8/v8/wiki/Debugging-Protocol
\ No newline at end of file
diff --git a/doc/api/deprecations.md b/doc/api/deprecations.md
new file mode 100644
index 00000000000000..e8a71996c78c0a
--- /dev/null
+++ b/doc/api/deprecations.md
@@ -0,0 +1,40 @@
+# Deprecated APIs
+
+Node.js may deprecate APIs when either: (a) use of the API is considered to be
+unsafe, (b) an improved alternative API has been made available, or (c)
+breaking changes to the API are expected in a future major release.
+
+Node.js utilizes three kinds of Deprecations:
+
+* Documentation-only
+* Runtime
+* End-of-Life
+
+A Documentation-only deprecation is one that is expressed only within the
+Node.js API docs. These generate no side-effects while running Node.js.
+
+A Runtime deprecation will, by default, generate a process warning that will
+be printed to `stderr` the first time the deprecated API is used. When the
+`--throw-deprecation` command-line flag is used, a Runtime deprecation will
+cause an error to be thrown.
+
+An End-of-Life deprecation is used to identify code that either has been
+removed or will soon be removed from Node.js.
+
+## Un-deprecation
+
+From time-to-time the deprecation of an API may be reversed. Such action may
+happen in either a semver-minor or semver-major release. In such situations,
+this document will be updated with information relevant to the decision.
+*However, the deprecation identifier will not be modified*.
+
+## List of Deprecated APIs
+
+
+### DEP0062: node --debug
+
+Type: Runtime
+
+`--debug` activates the legacy V8 debugger interface, which has been removed as
+of V8 5.8. It is replaced by Inspector which is activated with `--inspect`
+instead.
diff --git a/doc/api/dgram.md b/doc/api/dgram.md
index 2e214beb2ff7d2..b763620f2c58e2 100644
--- a/doc/api/dgram.md
+++ b/doc/api/dgram.md
@@ -225,9 +225,34 @@ never have reason to call this.
If `multicastInterface` is not specified, the operating system will attempt to
drop membership on all valid interfaces.
+### socket.ref()
+
+
+By default, binding a socket will cause it to block the Node.js process from
+exiting as long as the socket is open. The `socket.unref()` method can be used
+to exclude the socket from the reference counting that keeps the Node.js
+process active. The `socket.ref()` method adds the socket back to the reference
+counting and restores the default behavior.
+
+Calling `socket.ref()` multiples times will have no additional effect.
+
+The `socket.ref()` method returns a reference to the socket so calls can be
+chained.
+
### socket.send(msg, [offset, length,] port, address[, callback])
* `msg` {Buffer|String|Array} Message to be sent
@@ -377,22 +402,6 @@ Changing TTL values is typically done for network probes or when multicasting.
The argument to `socket.setTTL()` is a number of hops between 1 and 255.
The default on most systems is 64 but can vary.
-### socket.ref()
-
-
-By default, binding a socket will cause it to block the Node.js process from
-exiting as long as the socket is open. The `socket.unref()` method can be used
-to exclude the socket from the reference counting that keeps the Node.js
-process active. The `socket.ref()` method adds the socket back to the reference
-counting and restores the default behavior.
-
-Calling `socket.ref()` multiples times will have no additional effect.
-
-The `socket.ref()` method returns a reference to the socket so calls can be
-chained.
-
### socket.unref()
Resolves a hostname (e.g. `'nodejs.org'`) into the first found A (IPv4) or
@@ -84,15 +89,7 @@ Alternatively, `options` can be an object containing these properties:
* `all`: {Boolean} - When `true`, the callback returns all resolved addresses
in an array, otherwise returns a single address. Defaults to `false`.
-All properties are optional. An example usage of options is shown below.
-
-```js
-{
- family: 4,
- hints: dns.ADDRCONFIG | dns.V4MAPPED,
- all: false
-}
-```
+All properties are optional.
The `callback` function has arguments `(err, address, family)`. `address` is a
string representation of an IPv4 or IPv6 address. `family` is either the
@@ -115,6 +112,25 @@ important consequences on the behavior of any Node.js program. Please take some
time to consult the [Implementation considerations section][] before using
`dns.lookup()`.
+Example usage:
+
+```js
+const dns = require('dns');
+const options = {
+ family: 6,
+ hints: dns.ADDRCONFIG | dns.V4MAPPED,
+};
+dns.lookup('example.com', options, (err, address, family) =>
+ console.log('address: %j family: IPv%s', address, family));
+// address: "2606:2800:220:1:248:1893:25c8:1946" family: IPv6
+
+// When options.all is true, the result will be an Array.
+options.all = true;
+dns.lookup('example.com', options, (err, addresses) =>
+ console.log('addresses: %j', addresses));
+// addresses: [{"address":"2606:2800:220:1:248:1893:25c8:1946","family":6}]
+```
+
### Supported getaddrinfo flags
The following flags can be passed as hints to [`dns.lookup()`][].
@@ -186,6 +202,11 @@ one of the error codes listed [here](#dns_error_codes).
## dns.resolve4(hostname[, options], callback)
Uses the DNS protocol to resolve a IPv4 addresses (`A` records) for the
@@ -203,6 +224,11 @@ will contain an array of IPv4 addresses (e.g.
## dns.resolve6(hostname[, options], callback)
Uses the DNS protocol to resolve a IPv6 addresses (`AAAA` records) for the
@@ -276,6 +302,15 @@ Uses the DNS protocol to resolve name server records (`NS` records) for the
contain an array of name server records available for `hostname`
(e.g. `['ns1.example.com', 'ns2.example.com']`).
+## dns.resolvePtr(hostname, callback)
+
+
+Uses the DNS protocol to resolve pointer records (`PTR` records) for the
+`hostname`. The `addresses` argument passed to the `callback` function will
+be an array of strings containing the reply records.
+
## dns.resolveSoa(hostname, callback)
-
-Uses the DNS protocol to resolve pointer records (`PTR` records) for the
-`hostname`. The `addresses` argument passed to the `callback` function will
-be an array of strings containing the reply records.
-
## dns.resolveTxt(hostname, callback)
* `eventName` {String|Symbol} The event name
@@ -347,6 +352,11 @@ Returns the number of listeners listening to the event named `eventName`.
### emitter.listeners(eventName)
Returns a copy of the array of listeners for the event named `eventName`.
diff --git a/doc/api/fs.md b/doc/api/fs.md
index 187c7d9edc1a12..6f1fceae84f426 100644
--- a/doc/api/fs.md
+++ b/doc/api/fs.md
@@ -164,22 +164,22 @@ added: v0.1.93
`ReadStream` is a [Readable Stream][].
-### Event: 'open'
+### Event: 'close'
-* `fd` {Integer} Integer file descriptor used by the ReadStream.
-
-Emitted when the ReadStream's file is opened.
+Emitted when the `ReadStream`'s underlying file descriptor has been closed
+using the `fs.close()` method.
-### Event: 'close'
+### Event: 'open'
-Emitted when the `ReadStream`'s underlying file descriptor has been closed
-using the `fs.close()` method.
+* `fd` {Integer} Integer file descriptor used by the ReadStream.
+
+Emitted when the ReadStream's file is opened.
### readStream.bytesRead
-* `fd` {Integer} Integer file descriptor used by the WriteStream.
-
-Emitted when the WriteStream's file is opened.
+Emitted when the `WriteStream`'s underlying file descriptor has been closed
+using the `fs.close()` method.
-### Event: 'close'
+### Event: 'open'
-Emitted when the `WriteStream`'s underlying file descriptor has been closed
-using the `fs.close()` method.
+* `fd` {Integer} Integer file descriptor used by the WriteStream.
+
+Emitted when the WriteStream's file is opened.
### writeStream.bytesWritten
* `file` {String | Buffer | Number} filename or file descriptor
@@ -484,6 +495,13 @@ automatically._
## fs.appendFileSync(file, data[, options])
* `file` {String | Buffer | Number} filename or file descriptor
@@ -498,6 +516,11 @@ The synchronous version of [`fs.appendFile()`][]. Returns `undefined`.
## fs.chmod(path, mode, callback)
* `path` {String | Buffer}
@@ -520,6 +543,11 @@ Synchronous chmod(2). Returns `undefined`.
## fs.chown(path, uid, gid, callback)
* `path` {String | Buffer}
@@ -544,6 +572,11 @@ Synchronous chown(2). Returns `undefined`.
## fs.close(fd, callback)
* `fd` {Integer}
@@ -570,6 +603,13 @@ operations. The specific constants currently defined are described in
## fs.createReadStream(path[, options])
* `path` {String | Buffer}
@@ -631,6 +671,16 @@ If `options` is a string, then it specifies the encoding.
## fs.createWriteStream(path[, options])
* `path` {String | Buffer}
@@ -797,6 +847,11 @@ a callback.)
## fs.fchmod(fd, mode, callback)
* `fd` {Integer}
@@ -819,6 +874,11 @@ Synchronous fchmod(2). Returns `undefined`.
## fs.fchown(fd, uid, gid, callback)
* `fd` {Integer}
@@ -843,6 +903,11 @@ Synchronous fchown(2). Returns `undefined`.
## fs.fdatasync(fd, callback)
* `fd` {Integer}
@@ -863,6 +928,11 @@ Synchronous fdatasync(2). Returns `undefined`.
## fs.fstat(fd, callback)
* `fd` {Integer}
@@ -884,6 +954,11 @@ Synchronous fstat(2). Returns an instance of [`fs.Stats`][].
## fs.fsync(fd, callback)
* `fd` {Integer}
@@ -904,6 +979,11 @@ Synchronous fsync(2). Returns `undefined`.
## fs.ftruncate(fd, len, callback)
* `fd` {Integer}
@@ -967,6 +1047,15 @@ Synchronous ftruncate(2). Returns `undefined`.
## fs.futimes(fd, atime, mtime, callback)
* `fd` {Integer}
@@ -980,6 +1069,11 @@ descriptor.
## fs.futimesSync(fd, atime, mtime)
* `fd` {Integer}
@@ -991,6 +1085,11 @@ Synchronous version of [`fs.futimes()`][]. Returns `undefined`.
## fs.lchmod(path, mode, callback)
* `path` {String | Buffer}
@@ -1015,6 +1114,11 @@ Synchronous lchmod(2). Returns `undefined`.
## fs.lchown(path, uid, gid, callback)
* `path` {String | Buffer}
@@ -1039,6 +1143,11 @@ Synchronous lchown(2). Returns `undefined`.
## fs.link(existingPath, newPath, callback)
* `existingPath` {String | Buffer}
@@ -1061,6 +1170,11 @@ Synchronous link(2). Returns `undefined`.
## fs.lstat(path, callback)
* `path` {String | Buffer}
@@ -1083,6 +1197,11 @@ Synchronous lstat(2). Returns an instance of [`fs.Stats`][].
## fs.mkdir(path[, mode], callback)
* `path` {String | Buffer}
@@ -1105,6 +1224,14 @@ Synchronous mkdir(2). Returns `undefined`.
## fs.mkdtemp(prefix[, options], callback)
* `prefix` {String}
@@ -1278,6 +1405,13 @@ descriptor.
## fs.read(fd, buffer, offset, length, position, callback)
* `fd` {Integer}
@@ -1303,6 +1437,11 @@ The callback is given the three arguments, `(err, bytesRead, buffer)`.
## fs.readdir(path[, options], callback)
* `path` {String | Buffer}
@@ -1339,6 +1478,18 @@ the filenames returned will be passed as `Buffer` objects.
## fs.readFile(file[, options], callback)
* `file` {String | Buffer | Integer} filename or file descriptor
@@ -1375,6 +1526,10 @@ automatically._
## fs.readFileSync(file[, options])
* `file` {String | Buffer | Integer} filename or file descriptor
@@ -1390,6 +1545,11 @@ string. Otherwise it returns a buffer.
## fs.readlink(path[, options], callback)
* `path` {String | Buffer}
@@ -1424,6 +1584,10 @@ the link path returned will be passed as a `Buffer` object.
## fs.readSync(fd, buffer, offset, length, position)
* `fd` {Integer}
@@ -1437,6 +1601,18 @@ Synchronous version of [`fs.read()`][]. Returns the number of `bytesRead`.
## fs.realpath(path[, options], callback)
* `path` {String | Buffer}
@@ -1457,6 +1633,14 @@ the path returned will be passed as a `Buffer` object.
## fs.realpathSync(path[, options])
* `path` {String | Buffer};
@@ -1475,6 +1659,11 @@ will be passed as a `Buffer` object.
## fs.rename(oldPath, newPath, callback)
* `oldPath` {String | Buffer}
@@ -1497,6 +1686,11 @@ Synchronous rename(2). Returns `undefined`.
## fs.rmdir(path, callback)
* `path` {String | Buffer}
@@ -1517,6 +1711,11 @@ Synchronous rmdir(2). Returns `undefined`.
## fs.stat(path, callback)
* `path` {String | Buffer}
@@ -1583,6 +1782,11 @@ Synchronous symlink(2). Returns `undefined`.
## fs.truncate(path, len, callback)
* `path` {String | Buffer}
@@ -1607,6 +1811,11 @@ passed as the first argument. In this case, `fs.ftruncateSync()` is called.
## fs.unlink(path, callback)
* `path` {String | Buffer}
@@ -1646,6 +1855,15 @@ when possible._
## fs.utimes(path, atime, mtime, callback)
* `path` {String | Buffer}
@@ -1668,6 +1886,11 @@ follow these rules:
## fs.utimesSync(path, atime, mtime)
* `path` {String | Buffer}
@@ -1679,6 +1902,10 @@ Synchronous version of [`fs.utimes()`][]. Returns `undefined`.
## fs.watch(filename[, options][, listener])
* `filename` {String | Buffer}
@@ -1753,10 +1980,10 @@ watching the *original* inode. Events for the new inode will not be emitted.
This is expected behavior.
In AIX, save and close of a file being watched causes two notifications -
-one for adding new content, and one for truncation. Moreover, save and
-close operations on some platforms cause inode changes that force watch
-operations to become invalid and ineffective. AIX retains inode for the
-lifetime of a file, that way though this is different from Linux / OS X,
+one for adding new content, and one for truncation. Moreover, save and
+close operations on some platforms cause inode changes that force watch
+operations to become invalid and ineffective. AIX retains inode for the
+lifetime of a file, that way though this is different from Linux / OS X,
this improves the usability of file watching. This is expected behavior.
#### Filename Argument
@@ -1828,6 +2055,17 @@ _Note: [`fs.watch()`][] is more efficient than `fs.watchFile` and
## fs.write(fd, buffer[, offset[, length[, position]]], callback)
* `fd` {Integer}
@@ -1860,6 +2098,14 @@ the end of the file.
## fs.write(fd, string[, position[, encoding]], callback)
* `fd` {Integer}
@@ -1896,6 +2142,17 @@ the end of the file.
## fs.writeFile(file, data[, options], callback)
* `file` {String | Buffer | Integer} filename or file descriptor
@@ -1939,6 +2196,13 @@ automatically._
## fs.writeFileSync(file, data[, options])
* `file` {String | Buffer | Integer} filename or file descriptor
@@ -1953,6 +2217,13 @@ The synchronous version of [`fs.writeFile()`][]. Returns `undefined`.
## fs.writeSync(fd, buffer[, offset[, length[, position]]])
* `fd` {Integer}
@@ -1964,6 +2235,10 @@ added: v0.1.21
## fs.writeSync(fd, string[, position[, encoding]])
* `fd` {Integer}
@@ -2231,3 +2506,4 @@ The following constants are meant for use with the [`fs.Stats`][] object's
[`ReadDirectoryChangesW`]: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365465%28v=vs.85%29.aspx
[`AHAFS`]: https://www.ibm.com/developerworks/aix/library/au-aix_event_infrastructure/
[Common System Errors]: errors.html#errors_common_system_errors
+[`Uint8Array`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Uint8Array
diff --git a/doc/api/http.md b/doc/api/http.md
index 574bfe038b01b8..1b766c3eea97f4 100644
--- a/doc/api/http.md
+++ b/doc/api/http.md
@@ -450,6 +450,14 @@ added: v0.3.8
Marks the request as aborting. Calling this will cause remaining data
in the response to be dropped and the socket to be destroyed.
+### request.aborted
+
+
+If a request has been aborted, this value is the time when the request was
+aborted, in milliseconds since 1 January 1970 00:00:00 UTC.
+
### request.end([data][, encoding][, callback])
* `exception` {Error}
@@ -745,8 +759,14 @@ added: v0.1.90
* `callback` {Function}
Begin accepting connections on the specified `port` and `hostname`. If the
-`hostname` is omitted, the server will accept connections on any IPv6 address
-(`::`) when IPv6 is available, or any IPv4 address (`0.0.0.0`) otherwise.
+`hostname` is omitted, the server will accept connections on the
+[unspecified IPv6 address][] (`::`) when IPv6 is available, or the
+[unspecified IPv4 address][] (`0.0.0.0`) otherwise.
+
+*Note*: in most operating systems, listening to the
+[unspecified IPv6 address][] (`::`) may cause the `net.Server` to also listen on
+the [unspecified IPv4 address][] (`0.0.0.0`).
+
Omit the port argument, or use a port value of `0`, to have the operating system
assign a random port, which can be retrieved by using `server.address().port`
after the `'listening'` event has been emitted.
@@ -928,6 +948,66 @@ Example:
var contentType = response.getHeader('content-type');
```
+### response.getHeaderNames()
+
+
+* Returns: {Array}
+
+Returns an array containing the unique names of the current outgoing headers.
+All header names are lowercase.
+
+Example:
+
+```js
+response.setHeader('Foo', 'bar');
+response.setHeader('Set-Cookie', ['foo=bar', 'bar=baz']);
+
+var headerNames = response.getHeaderNames();
+// headerNames === ['foo', 'set-cookie']
+```
+
+### response.getHeaders()
+
+
+* Returns: {Object}
+
+Returns a shallow copy of the current outgoing headers. Since a shallow copy
+is used, array values may be mutated without additional calls to various
+header-related http module methods. The keys of the returned object are the
+header names and the values are the respective header values. All header names
+are lowercase.
+
+Example:
+
+```js
+response.setHeader('Foo', 'bar');
+response.setHeader('Set-Cookie', ['foo=bar', 'bar=baz']);
+
+var headers = response.getHeaders();
+// headers === { foo: 'bar', 'set-cookie': ['foo=bar', 'bar=baz'] }
+```
+
+### response.hasHeader(name)
+
+
+* `name` {String}
+* Returns: {Boolean}
+
+Returns `true` if the header identified by `name` is currently set in the
+outgoing headers. Note that the header name matching is case-insensitive.
+
+Example:
+
+```js
+var hasContentType = response.hasHeader('content-type');
+```
+
### response.headersSent
* `statusCode` {Number}
@@ -1310,6 +1395,18 @@ Calls `message.connection.setTimeout(msecs, callback)`.
Returns `message`.
+### message.socket
+
+
+* {net.Socket}
+
+The [`net.Socket`][] object associated with the connection.
+
+With HTTPS support, use [`request.socket.getPeerCertificate()`][] to obtain the
+client's authentication details.
+
### message.statusCode
-
-* {net.Socket}
-
-The [`net.Socket`][] object associated with the connection.
-
-With HTTPS support, use [`request.socket.getPeerCertificate()`][] to obtain the
-client's authentication details.
-
### message.trailers
Begin accepting connections on the specified `port` and `hostname`. If the
-`hostname` is omitted, the server will accept connections on any IPv6 address
-(`::`) when IPv6 is available, or any IPv4 address (`0.0.0.0`) otherwise.
+`hostname` is omitted, the server will accept connections on the
+[unspecified IPv6 address][] (`::`) when IPv6 is available, or the
+[unspecified IPv4 address][] (`0.0.0.0`) otherwise.
+
+*Note*: in most operating systems, listening to the
+[unspecified IPv6 address][] (`::`) may cause the `net.Server` to also listen on
+the [unspecified IPv4 address][] (`0.0.0.0`).
+
Omit the port argument, or use a port value of `0`, to have the operating system
assign a random port, which can be retrieved by using `server.address().port`
after the `'listening'` event has been emitted.
@@ -403,6 +411,10 @@ following this event.
### Event: 'lookup'
Emitted after resolving the hostname but before connecting.
@@ -471,6 +483,15 @@ The amount of bytes sent.
### socket.connect(options[, connectListener])
Opens the connection for a given socket.
@@ -936,8 +957,11 @@ Returns true if input is a version 6 IP address, otherwise returns false.
[`resume()`]: #net_socket_resume
[`server.getConnections()`]: #net_server_getconnections_callback
[`server.listen(port, host, backlog, callback)`]: #net_server_listen_port_hostname_backlog_callback
+[`server.close()`]: #net_server_close_callback
[`socket.connect(options, connectListener)`]: #net_socket_connect_options_connectlistener
[`socket.connect`]: #net_socket_connect_options_connectlistener
[`socket.setTimeout()`]: #net_socket_settimeout_timeout_callback
[`stream.setEncoding()`]: stream.html#stream_readable_setencoding_encoding
[Readable Stream]: stream.html#stream_class_stream_readable
+[unspecified IPv6 address]: https://en.wikipedia.org/wiki/IPv6_address#Unspecified_address
+[unspecified IPv4 address]: https://en.wikipedia.org/wiki/0.0.0.0
diff --git a/doc/api/os.md b/doc/api/os.md
index 15b84189c91272..d1a4cc66aed053 100644
--- a/doc/api/os.md
+++ b/doc/api/os.md
@@ -333,6 +333,11 @@ https://en.wikipedia.org/wiki/Uname#Examples for more information.
## os.tmpdir()
* Returns: {String}
@@ -404,6 +409,12 @@ The following constants are exported by `os.constants`. **Note:** Not all
constants will be available on every operating system.
### Signal Constants
+
The following signal constants are exported by `os.constants.signals`:
diff --git a/doc/api/path.md b/doc/api/path.md
index df65616b6c4e94..f284c500595571 100644
--- a/doc/api/path.md
+++ b/doc/api/path.md
@@ -57,6 +57,10 @@ path.posix.basename('/tmp/myfile.html');
## path.basename(path[, ext])
* `path` {String}
@@ -114,6 +118,10 @@ process.env.PATH.split(path.delimiter)
## path.dirname(path)
* `path` {String}
@@ -134,6 +142,10 @@ A [`TypeError`][] is thrown if `path` is not a string.
## path.extname(path)
* `path` {String}
@@ -408,6 +420,11 @@ of the `path` methods.
## path.relative(from, to)
* `from` {String}
diff --git a/doc/api/process.md b/doc/api/process.md
index 2a413c98c1584e..8e7f75232ec61a 100644
--- a/doc/api/process.md
+++ b/doc/api/process.md
@@ -203,6 +203,14 @@ to detect application failures and recover or restart as needed.
### Event: 'unhandledRejection'
The `'unhandledRejection`' event is emitted whenever a `Promise` is rejected and
@@ -629,83 +637,6 @@ process's [`ChildProcess.disconnect()`][].
If the Node.js process was not spawned with an IPC channel,
`process.disconnect()` will be `undefined`.
-## process.env
-
-
-* {Object}
-
-The `process.env` property returns an object containing the user environment.
-See environ(7).
-
-An example of this object looks like:
-
-```js
-{
- TERM: 'xterm-256color',
- SHELL: '/usr/local/bin/bash',
- USER: 'maciej',
- PATH: '~/.bin/:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin',
- PWD: '/Users/maciej',
- EDITOR: 'vim',
- SHLVL: '1',
- HOME: '/Users/maciej',
- LOGNAME: 'maciej',
- _: '/usr/local/bin/node'
-}
-```
-
-It is possible to modify this object, but such modifications will not be
-reflected outside the Node.js process. In other words, the following example
-would not work:
-
-```console
-$ node -e 'process.env.foo = "bar"' && echo $foo
-```
-
-While the following will:
-
-```js
-process.env.foo = 'bar';
-console.log(process.env.foo);
-```
-
-Assigning a property on `process.env` will implicitly convert the value
-to a string.
-
-Example:
-
-```js
-process.env.test = null;
-console.log(process.env.test);
-// => 'null'
-process.env.test = undefined;
-console.log(process.env.test);
-// => 'undefined'
-```
-
-Use `delete` to delete a property from `process.env`.
-
-Example:
-
-```js
-process.env.TEST = 1;
-delete process.env.TEST;
-console.log(process.env.TEST);
-// => undefined
-```
-
-On Windows operating systems, environment variables are case-insensitive.
-
-Example:
-
-```js
-process.env.TEST = 1;
-console.log(process.env.test);
-// => 1
-```
-
## process.emitWarning(warning[, name][, ctor])
+
+* {Object}
+
+The `process.env` property returns an object containing the user environment.
+See environ(7).
+
+An example of this object looks like:
+
+```js
+{
+ TERM: 'xterm-256color',
+ SHELL: '/usr/local/bin/bash',
+ USER: 'maciej',
+ PATH: '~/.bin/:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin',
+ PWD: '/Users/maciej',
+ EDITOR: 'vim',
+ SHLVL: '1',
+ HOME: '/Users/maciej',
+ LOGNAME: 'maciej',
+ _: '/usr/local/bin/node'
+}
+```
+
+It is possible to modify this object, but such modifications will not be
+reflected outside the Node.js process. In other words, the following example
+would not work:
+
+```console
+$ node -e 'process.env.foo = "bar"' && echo $foo
+```
+
+While the following will:
+
+```js
+process.env.foo = 'bar';
+console.log(process.env.foo);
+```
+
+Assigning a property on `process.env` will implicitly convert the value
+to a string.
+
+Example:
+
+```js
+process.env.test = null;
+console.log(process.env.test);
+// => 'null'
+process.env.test = undefined;
+console.log(process.env.test);
+// => 'undefined'
+```
+
+Use `delete` to delete a property from `process.env`.
+
+Example:
+
+```js
+process.env.TEST = 1;
+delete process.env.TEST;
+console.log(process.env.TEST);
+// => undefined
+```
+
+On Windows operating systems, environment variables are case-insensitive.
+
+Example:
+
+```js
+process.env.TEST = 1;
+console.log(process.env.test);
+// => 1
+```
+
## process.execArgv
* Returns: {Object}
@@ -1164,6 +1177,10 @@ objects managed by V8.
## process.nextTick(callback[, ...args])
* `callback` {Function}
@@ -1283,6 +1300,10 @@ console.log(`This platform is ${process.platform}`);
## process.release
The `process.release` property returns an Object containing metadata related to
@@ -1475,23 +1496,11 @@ Android)
* {Stream}
-The `process.stderr` property returns a [Writable][] stream equivalent to or
-associated with `stderr` (fd `2`).
-
-Note: `process.stderr` and `process.stdout` differ from other Node.js streams
-in several ways:
-1. They cannot be closed ([`end()`][] will throw).
-2. They never emit the [`'finish'`][] event.
-3. Writes _can_ block when output is redirected to a file.
- - Note that disks are fast and operating systems normally employ write-back
- caching so this is very uncommon.
-4. Writes on UNIX **will** block by default if output is going to a TTY
- (a terminal).
-5. Windows functionality differs. Writes block except when output is going to a
- TTY.
+The `process.stderr` property returns a [Writable][] stream connected to
+`stderr` (fd `2`).
-To check if Node.js is being run in a TTY context, read the `isTTY` property
-on `process.stderr`, `process.stdout`, or `process.stdin`:
+Note: `process.stderr` differs from other Node.js streams in important ways,
+see [note on process I/O][] for more information.
## process.stdin
@@ -1529,40 +1538,52 @@ must call `process.stdin.resume()` to read from it. Note also that calling
* {Stream}
-The `process.stdout` property returns a [Writable][] stream equivalent to or
-associated with `stdout` (fd `1`).
+The `process.stdout` property returns a [Writable][] stream connected to
+`stdout` (fd `2`).
-For example:
+For example, to copy process.stdin to process.stdout:
```js
-console.log = (msg) => {
- process.stdout.write(`${msg}\n`);
-};
+process.stdin.pipe(process.stdout);
```
-Note: `process.stderr` and `process.stdout` differ from other Node.js streams
-in several ways:
-1. They cannot be closed ([`end()`][] will throw).
-2. They never emit the [`'finish'`][] event.
-3. Writes _can_ block when output is redirected to a file.
- - Note that disks are fast and operating systems normally employ write-back
- caching so this is very uncommon.
-4. Writes on UNIX **will** block by default if output is going to a TTY
- (a terminal).
-5. Windows functionality differs. Writes block except when output is going to a
- TTY.
+Note: `process.stdout` differs from other Node.js streams in important ways,
+see [note on process I/O][] for more information.
+
+### A note on process I/O
+
+`process.stdout` and `process.stderr` differ from other Node.js streams in
+important ways:
-To check if Node.js is being run in a TTY context, read the `isTTY` property
-on `process.stderr`, `process.stdout`, or `process.stdin`:
+1. They are used internally by [`console.log()`][] and [`console.error()`][],
+ respectively.
+2. They cannot be closed ([`end()`][] will throw).
+3. They will never emit the [`'finish'`][] event.
+4. Writes may be synchronous depending on the what the stream is connected to
+ and whether the system is Windows or Unix:
+ - Files: *synchronous* on Windows and Linux
+ - TTYs (Terminals): *asynchronous* on Windows, *synchronous* on Unix
+ - Pipes (and sockets): *synchronous* on Windows, *asynchronous* on Unix
-### TTY Terminals and `process.stdout`
+These behaviours are partly for historical reasons, as changing them would
+create backwards incompatibility, but they are also expected by some users.
-The `process.stderr` and `process.stdout` streams are blocking when outputting
-to TTYs (terminals) on OS X as a workaround for the operating system's small,
-1kb buffer size. This is to prevent interleaving between `stdout` and `stderr`.
+Synchronous writes avoid problems such as output written with `console.log()` or
+`console.write()` being unexpectedly interleaved, or not written at all if
+`process.exit()` is called before an asynchronous write completes. See
+[`process.exit()`][] for more information.
-To check if Node.js is being run in a [TTY][] context, check the `isTTY`
-property on `process.stderr`, `process.stdout`, or `process.stdin`.
+***Warning***: Synchronous writes block the event loop until the write has
+completed. This can be near instantaneous in the case of output to a file, but
+under high system load, pipes that are not being read at the receiving end, or
+with slow terminals or file systems, its possible for the event loop to be
+blocked often enough and long enough to have severe negative performance
+impacts. This may not be a problem when writing to an interactive terminal
+session, but consider this particularly careful when doing production logging to
+the process output streams.
+
+To check if a stream is connected to a [TTY][] context, check the `isTTY`
+property.
For instance:
```console
@@ -1570,7 +1591,6 @@ $ node -p "Boolean(process.stdin.isTTY)"
true
$ echo "foo" | node -p "Boolean(process.stdin.isTTY)"
false
-
$ node -p "Boolean(process.stdout.isTTY)"
true
$ node -p "Boolean(process.stdout.isTTY)" | cat
@@ -1646,6 +1666,10 @@ console.log(`Version: ${process.version}`);
## process.versions
* {Object}
@@ -1724,6 +1748,7 @@ cases:
the high-order bit, and then contain the value of the signal code.
+[`'exit'`]: #process_event_exit
[`'finish'`]: stream.html#stream_event_finish
[`'message'`]: child_process.html#child_process_event_message
[`'rejectionHandled'`]: #process_event_rejectionhandled
@@ -1745,13 +1770,14 @@ cases:
[`promise.catch()`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise/catch
[`require.main`]: modules.html#modules_accessing_the_main_module
[`setTimeout(fn, 0)`]: timers.html#timers_settimeout_callback_delay_args
+[note on process I/O]: process.html#process_a_note_on_process_i_o
[process_emit_warning]: #process_process_emitwarning_warning_name_ctor
[process_warning]: #process_event_warning
[Signal Events]: #process_signal_events
[Stream compatibility]: stream.html#stream_compatibility_with_older_node_js_versions
[TTY]: tty.html#tty_tty
-[Writable]: stream.html
-[Readable]: stream.html
+[Writable]: stream.html#stream_writable_streams
+[Readable]: stream.html#stream_readable_streams
[Child Process]: child_process.html
[Cluster]: cluster.html
[`process.exitCode`]: #process_process_exitcode
diff --git a/doc/api/punycode.md b/doc/api/punycode.md
index a5d1908a8c40b6..7b0b63939de369 100644
--- a/doc/api/punycode.md
+++ b/doc/api/punycode.md
@@ -1,4 +1,10 @@
# Punycode
+
> Stability: 0 - Deprecated
diff --git a/doc/api/querystring.md b/doc/api/querystring.md
index 443bd4c003501e..0c31c1600bd67c 100644
--- a/doc/api/querystring.md
+++ b/doc/api/querystring.md
@@ -30,6 +30,13 @@ necessary by assigning `querystring.escape` to an alternative function.
## querystring.parse(str[, sep[, eq[, options]]])
* `str` {String} The URL query string to parse
diff --git a/doc/api/readline.md b/doc/api/readline.md
index 32fad5732c70df..10164bc7887984 100644
--- a/doc/api/readline.md
+++ b/doc/api/readline.md
@@ -124,7 +124,7 @@ added: v0.7.5
The `'SIGCONT'` event is emitted when a Node.js process previously moved into
the background using `-Z` (i.e. `SIGTSTP`) is then brought back to the
-foreground using fg(1).
+foreground using fg(1p).
If the `input` stream was paused *before* the `SIGTSTP` request, this event will
not be emitted.
@@ -174,7 +174,7 @@ input, typically known as `SIGTSTP`. If there are no `SIGTSTP` event listeners
registered when the `input` stream receives a `SIGTSTP`, the Node.js process
will be sent to the background.
-When the program is resumed using fg(1), the `'pause'` and `SIGCONT` events
+When the program is resumed using fg(1p), the `'pause'` and `SIGCONT` events
will be emitted. These can be used to resume the `input` stream.
The `'pause'` and `'SIGCONT'` events will not be emitted if the `input` was
@@ -344,6 +344,13 @@ the current position of the cursor down.
## readline.createInterface(options)
* `options` {Object}
@@ -518,8 +525,8 @@ rl.on('line', (line) => {
[`process.stdin`]: process.html#process_process_stdin
[`process.stdout`]: process.html#process_process_stdout
-[Writable]: stream.html
-[Readable]: stream.html
+[Writable]: stream.html#stream_writable_streams
+[Readable]: stream.html#stream_readable_streams
[TTY]: tty.html
[`SIGTSTP`]: readline.html#readline_event_sigtstp
[`SIGCONT`]: readline.html#readline_event_sigcont
diff --git a/doc/api/repl.md b/doc/api/repl.md
index 1b03ba47a25d99..f4d85e11c96b3b 100644
--- a/doc/api/repl.md
+++ b/doc/api/repl.md
@@ -373,6 +373,10 @@ within the action function for commands registered using the
## repl.start([options])
* `options` {Object | String}
diff --git a/doc/api/stream.md b/doc/api/stream.md
index ba1c56398d544c..0e12e76c810c17 100644
--- a/doc/api/stream.md
+++ b/doc/api/stream.md
@@ -380,6 +380,10 @@ file.end('world!');
##### writable.setDefaultEncoding(encoding)
* `encoding` {String} The new default encoding
@@ -429,6 +433,11 @@ See also: [`writable.cork()`][].
##### writable.write(chunk[, encoding][, callback])
* `chunk` {String|Buffer} The data to write
@@ -1069,6 +1078,11 @@ myReader.on('readable', () => {
#### Class: stream.Duplex
@@ -1190,6 +1204,9 @@ the [API for Stream Consumers][] section). Doing so may lead to adverse
side effects in application code consuming the stream.
### Simplified Construction
+
For many simple cases, it is possible to construct a stream without relying on
inheritance. This can be accomplished by directly creating instances of the
diff --git a/doc/api/tls.md b/doc/api/tls.md
index f83ed1df3e449f..94281dd3f00c28 100644
--- a/doc/api/tls.md
+++ b/doc/api/tls.md
@@ -214,19 +214,6 @@ added: v0.3.2
The `tls.Server` class is a subclass of `net.Server` that accepts encrypted
connections using TLS or SSL.
-### Event: 'tlsClientError'
-
-
-The `'tlsClientError'` event is emitted when an error occurs before a secure
-connection is established. The listener callback is passed two arguments when
-called:
-
-* `exception` {Error} The `Error` object describing the error
-* `tlsSocket` {tls.TLSSocket} The `tls.TLSSocket` instance from which the
- error originated.
-
### Event: 'newSession'
+
+The `'tlsClientError'` event is emitted when an error occurs before a secure
+connection is established. The listener callback is passed two arguments when
+called:
+
+* `exception` {Error} The `Error` object describing the error
+* `tlsSocket` {tls.TLSSocket} The `tls.TLSSocket` instance from which the
+ error originated.
+
### server.addContext(hostname, context)
* `socket` {net.Socket} An instance of [`net.Socket`][]
@@ -483,7 +487,12 @@ added: v0.11.4
will be emitted on the socket before establishing a secure communication
* `secureContext`: Optional TLS context object created with
[`tls.createSecureContext()`][]. If a `secureContext` is _not_ provided, one
- will be created by calling [`tls.createSecureContext()`][] with no options.
+ will be created by passing the entire `options` object to
+ `tls.createSecureContext()`. *Note*: In effect, all
+ [`tls.createSecureContext()`][] options can be provided, but they will be
+ _completely ignored_ unless the `secureContext` option is missing.
+ * ...: Optional [`tls.createSecureContext()`][] options can be provided, see
+ the `secureContext` option for more information.
Construct a new `tls.TLSSocket` object from an existing TCP socket.
@@ -526,21 +535,21 @@ underlying socket as reported by the operating system. Returns an
object with three properties, e.g.,
`{ port: 12346, family: 'IPv4', address: '127.0.0.1' }`
-### tlsSocket.authorized
+### tlsSocket.authorizationError
-Returns `true` if the peer certificate was signed by one of the CAs specified
-when creating the `tls.TLSSocket` instance, otherwise `false`.
+Returns the reason why the peer's certificate was not been verified. This
+property is set only when `tlsSocket.authorized === false`.
-### tlsSocket.authorizationError
+### tlsSocket.authorized
-Returns the reason why the peer's certificate was not been verified. This
-property is set only when `tlsSocket.authorized === false`.
+Returns `true` if the peer certificate was signed by one of the CAs specified
+when creating the `tls.TLSSocket` instance, otherwise `false`.
### tlsSocket.encrypted
-
-* `port` {number} Default value for `options.port`.
-* `host` {string} Optional default value for `options.host`.
-* `options` {Object} See [`tls.connect()`][].
-* `callback` {Function} See [`tls.connect()`][].
-
-Same as [`tls.connect()`][] except that `port` and `host` can be provided
-as arguments instead of options.
-
-*Note*: A port or host option, if specified, will take precedence over any port
-or host argument.
-
-## tls.connect(path[, options][, callback])
-
-
-* `path` {string} Default value for `options.path`.
-* `options` {Object} See [`tls.connect()`][].
-* `callback` {Function} See [`tls.connect()`][].
-
-Same as [`tls.connect()`][] except that `path` can be provided
-as an argument instead of an option.
-
-*Note*: A path option, if specified, will take precedence over the path
-argument.
-
## tls.connect(options[, callback])
* `options` {Object}
@@ -879,10 +864,51 @@ socket.on('end', () => {
});
```
+## tls.connect(path[, options][, callback])
+
+
+* `path` {string} Default value for `options.path`.
+* `options` {Object} See [`tls.connect()`][].
+* `callback` {Function} See [`tls.connect()`][].
+
+Same as [`tls.connect()`][] except that `path` can be provided
+as an argument instead of an option.
+
+*Note*: A path option, if specified, will take precedence over the path
+argument.
+
+## tls.connect(port[, host][, options][, callback])
+
+
+* `port` {number} Default value for `options.port`.
+* `host` {string} Optional default value for `options.host`.
+* `options` {Object} See [`tls.connect()`][].
+* `callback` {Function} See [`tls.connect()`][].
+
+Same as [`tls.connect()`][] except that `port` and `host` can be provided
+as arguments instead of options.
+
+*Note*: A port or host option, if specified, will take precedence over any port
+or host argument.
+
## tls.createSecureContext(options)
* `options` {Object}
@@ -972,6 +998,10 @@ publicly trusted list of CAs as given in
## tls.createServer([options][, secureConnectionListener])
* `options` {Object}
@@ -1146,6 +1176,10 @@ certificate used is properly authorized.
> Stability: 0 - Deprecated: Use [`tls.TLSSocket`][] instead.
diff --git a/doc/api/tracing.md b/doc/api/tracing.md
new file mode 100644
index 00000000000000..28e488201ec2d8
--- /dev/null
+++ b/doc/api/tracing.md
@@ -0,0 +1,19 @@
+# Tracing
+
+Trace Event provides a mechanism to centralize tracing information generated by
+V8, Node core, and userspace code.
+
+Tracing can be enabled by passing the `--trace-events-enabled` flag when starting a
+Node.js application.
+
+The set of categories for which traces are recorded can be specified using the
+`--trace-event-categories` flag followed by a list of comma separated category names.
+By default the `node` and `v8` categories are enabled.
+
+```txt
+node --trace-events-enabled --trace-event-categories v8,node server.js
+```
+
+Running Node.js with tracing enabled will produce log files that can be opened
+in the [`chrome://tracing`](https://www.chromium.org/developers/how-tos/trace-event-profiling-tool)
+tab of Chrome.
diff --git a/doc/api/url.md b/doc/api/url.md
old mode 100755
new mode 100644
index 50e7433e6110e5..c7bc6d5dbbe2f1
--- a/doc/api/url.md
+++ b/doc/api/url.md
@@ -34,24 +34,22 @@ illustrate each.
(all spaces in the "" line should be ignored -- they are purely for formatting)
```
-### urlObject.href
-
-The `href` property is the full URL string that was parsed with both the
-`protocol` and `host` components converted to lower-case.
-
-For example: `'http://user:pass@host.com:8080/p/a/t/h?query=string#hash'`
+### urlObject.auth
-### urlObject.protocol
+The `auth` property is the username and password portion of the URL, also
+referred to as "userinfo". This string subset follows the `protocol` and
+double slashes (if present) and precedes the `host` component, delimited by an
+ASCII "at sign" (`@`). The format of the string is `{username}[:{password}]`,
+with the `[:{password}]` portion being optional.
-The `protocol` property identifies the URL's lower-cased protocol scheme.
+For example: `'user:pass'`
-For example: `'http:'`
+### urlObject.hash
-### urlObject.slashes
+The `hash` property consists of the "fragment" portion of the URL including
+the leading ASCII hash (`#`) character.
-The `slashes` property is a `boolean` with a value of `true` if two ASCII
-forward-slash characters (`/`) are required following the colon in the
-`protocol`.
+For example: `'#hash'`
### urlObject.host
@@ -60,16 +58,6 @@ the `port` if specified.
For example: `'host.com:8080'`
-### urlObject.auth
-
-The `auth` property is the username and password portion of the URL, also
-referred to as "userinfo". This string subset follows the `protocol` and
-double slashes (if present) and precedes the `host` component, delimited by an
-ASCII "at sign" (`@`). The format of the string is `{username}[:{password}]`,
-with the `[:{password}]` portion being optional.
-
-For example: `'user:pass'`
-
### urlObject.hostname
The `hostname` property is the lower-cased host name portion of the `host`
@@ -77,11 +65,21 @@ component *without* the `port` included.
For example: `'host.com'`
-### urlObject.port
+### urlObject.href
-The `port` property is the numeric port portion of the `host` component.
+The `href` property is the full URL string that was parsed with both the
+`protocol` and `host` components converted to lower-case.
-For example: `'8080'`
+For example: `'http://user:pass@host.com:8080/p/a/t/h?query=string#hash'`
+
+### urlObject.path
+
+The `path` property is a concatenation of the `pathname` and `search`
+components.
+
+For example: `'/p/a/t/h?query=string'`
+
+No decoding of the `path` is performed.
### urlObject.pathname
@@ -94,23 +92,17 @@ For example `'/p/a/t/h'`
No decoding of the path string is performed.
-### urlObject.search
-
-The `search` property consists of the entire "query string" portion of the
-URL, including the leading ASCII question mark (`?`) character.
-
-For example: `'?query=string'`
+### urlObject.port
-No decoding of the query string is performed.
+The `port` property is the numeric port portion of the `host` component.
-### urlObject.path
+For example: `'8080'`
-The `path` property is a concatenation of the `pathname` and `search`
-components.
+### urlObject.protocol
-For example: `'/p/a/t/h?query=string'`
+The `protocol` property identifies the URL's lower-cased protocol scheme.
-No decoding of the `path` is performed.
+For example: `'http:'`
### urlObject.query
@@ -124,12 +116,20 @@ For example: `'query=string'` or `{'query': 'string'}`
If returned as a string, no decoding of the query string is performed. If
returned as an object, both keys and values are decoded.
-### urlObject.hash
+### urlObject.search
-The `hash` property consists of the "fragment" portion of the URL including
-the leading ASCII hash (`#`) character.
+The `search` property consists of the entire "query string" portion of the
+URL, including the leading ASCII question mark (`?`) character.
-For example: `'#hash'`
+For example: `'?query=string'`
+
+No decoding of the query string is performed.
+
+### urlObject.slashes
+
+The `slashes` property is a `boolean` with a value of `true` if two ASCII
+forward-slash characters (`/`) are required following the colon in the
+`protocol`.
## url.format(urlObject)
* `from` {String} The Base URL being resolved against.
@@ -270,6 +323,35 @@ console.log(myURL.pathname); // /foo
*Note*: Using the `delete` keyword (e.g. `delete myURL.protocol`,
`delete myURL.pathname`, etc) has no effect but will still return `true`.
+A comparison between this API and `url.parse()` is given below. Above the URL
+`'http://user:pass@host.com:8080/p/a/t/h?query=string#hash'`, properties of an
+object returned by `url.parse()` are shown. Below it are properties of a WHATWG
+`URL` object.
+
+*Note*: WHATWG URL's `origin` property includes `protocol` and `host`, but not
+`username` or `password`.
+
+```txt
+┌─────────────────────────────────────────────────────────────────────────────────────────┐
+│ href │
+├──────────┬──┬─────────────────────┬─────────────────┬───────────────────────────┬───────┤
+│ protocol │ │ auth │ host │ path │ hash │
+│ │ │ ├──────────┬──────┼──────────┬────────────────┤ │
+│ │ │ │ hostname │ port │ pathname │ search │ │
+│ │ │ │ │ │ ├─┬──────────────┤ │
+│ │ │ │ │ │ │ │ query │ │
+" http: // user : pass @ host.com : 8080 /p/a/t/h ? query=string #hash "
+│ │ │ │ │ hostname │ port │ │ │ │
+│ │ │ │ ├──────────┴──────┤ │ │ │
+│ protocol │ │ username │ password │ host │ │ │ │
+├──────────┴──┼──────────┴──────────┼─────────────────┤ │ │ │
+│ origin │ │ origin │ pathname │ search │ hash │
+├─────────────┴─────────────────────┴─────────────────┴──────────┴────────────────┴───────┤
+│ href │
+└─────────────────────────────────────────────────────────────────────────────────────────┘
+(all spaces in the "" line should be ignored -- they are purely for formatting)
+```
+
### Class: URL
#### Constructor: new URL(input[, base])
@@ -299,13 +381,15 @@ automatically converted to ASCII using the [Punycode][] algorithm.
```js
const myURL = new URL('https://你好你好');
- // https://xn--6qqa088eba
+ // https://xn--6qqa088eba/
```
Additional [examples of parsed URLs][] may be found in the WHATWG URL Standard.
#### url.hash
+* {String}
+
Gets and sets the fragment portion of the URL.
```js
@@ -319,12 +403,14 @@ console.log(myURL.href);
```
Invalid URL characters included in the value assigned to the `hash` property
-are [percent-encoded](#whatwg-percent-encoding). Note that the selection of
-which characters to percent-encode may vary somewhat from what the
-[`url.parse()`][] and [`url.format()`][] methods would produce.
+are [percent-encoded][]. Note that the selection of which characters to
+percent-encode may vary somewhat from what the [`url.parse()`][] and
+[`url.format()`][] methods would produce.
#### url.host
+* {String}
+
Gets and sets the host portion of the URL.
```js
@@ -341,6 +427,8 @@ Invalid host values assigned to the `host` property are ignored.
#### url.hostname
+* {String}
+
Gets and sets the hostname portion of the URL. The key difference between
`url.host` and `url.hostname` is that `url.hostname` does *not* include the
port.
@@ -359,6 +447,8 @@ Invalid hostname values assigned to the `hostname` property are ignored.
#### url.href
+* {String}
+
Gets and sets the serialized URL.
```js
@@ -370,15 +460,20 @@ myURL.href = 'https://example.com/bar'
// Prints https://example.com/bar
```
-Setting the value of the `href` property to a new value is equivalent to
-creating a new `URL` object using `new URL(value)`. Each of the `URL` object's
-properties will be modified.
+Getting the value of the `href` property is equivalent to calling
+[`url.toString()`][].
+
+Setting the value of this property to a new value is equivalent to creating a
+new `URL` object using [`new URL(value)`][`new URL()`]. Each of the `URL`
+object's properties will be modified.
If the value assigned to the `href` property is not a valid URL, a `TypeError`
will be thrown.
#### url.origin
+* {String}
+
Gets the read-only serialization of the URL's origin. Unicode characters that
may be contained within the hostname will be encoded as-is without [Punycode][]
encoding.
@@ -400,6 +495,8 @@ console.log(idnURL.hostname);
#### url.password
+* {String}
+
Gets and sets the password portion of the URL.
```js
@@ -413,12 +510,14 @@ console.log(myURL.href);
```
Invalid URL characters included in the value assigned to the `password` property
-are [percent-encoded](#whatwg-percent-encoding). Note that the selection of
-which characters to percent-encode may vary somewhat from what the
-[`url.parse()`][] and [`url.format()`][] methods would produce.
+are [percent-encoded][]. Note that the selection of which characters to
+percent-encode may vary somewhat from what the [`url.parse()`][] and
+[`url.format()`][] methods would produce.
#### url.pathname
+* {String}
+
Gets and sets the path portion of the URL.
```js
@@ -432,23 +531,54 @@ console.log(myURL.href);
```
Invalid URL characters included in the value assigned to the `pathname`
-property are [percent-encoded](#whatwg-percent-encoding). Note that the
-selection of which characters to percent-encode may vary somewhat from what the
-[`url.parse()`][] and [`url.format()`][] methods would produce.
+property are [percent-encoded][]. Note that the selection of which characters
+to percent-encode may vary somewhat from what the [`url.parse()`][] and
+[`url.format()`][] methods would produce.
#### url.port
-Gets and sets the port portion of the URL. When getting the port, the value
-is returned as a String.
+* {String}
+
+Gets and sets the port portion of the URL.
```js
const myURL = new URL('https://example.org:8888');
console.log(myURL.port);
// Prints 8888
+// Default ports are automatically transformed to the empty string
+// (HTTPS protocol's default port is 443)
+myURL.port = '443';
+console.log(myURL.port);
+ // Prints the empty string
+console.log(myURL.href);
+ // Prints https://example.org/
+
myURL.port = 1234;
+console.log(myURL.port);
+ // Prints 1234
console.log(myURL.href);
- // Prints https://example.org:1234
+ // Prints https://example.org:1234/
+
+// Completely invalid port strings are ignored
+myURL.port = 'abcd';
+console.log(myURL.port);
+ // Prints 1234
+
+// Leading numbers are treated as a port number
+myURL.port = '5678abcd';
+console.log(myURL.port);
+ // Prints 5678
+
+// Non-integers are truncated
+myURL.port = 1234.5678;
+console.log(myURL.port);
+ // Prints 1234
+
+// Out-of-range numbers are ignored
+myURL.port = 1e10;
+console.log(myURL.port);
+ // Prints 1234
```
The port value may be set as either a number or as a String containing a number
@@ -456,10 +586,14 @@ in the range `0` to `65535` (inclusive). Setting the value to the default port
of the `URL` objects given `protocol` will result in the `port` value becoming
the empty string (`''`).
-Invalid URL port values assigned to the `port` property are ignored.
+If an invalid string is assigned to the `port` property, but it begins with a
+number, the leading number is assigned to `port`. Otherwise, or if the number
+lies outside the range denoted above, it is ignored.
#### url.protocol
+* {String}
+
Gets and sets the protocol portion of the URL.
```js
@@ -476,6 +610,8 @@ Invalid URL protocol values assigned to the `protocol` property are ignored.
#### url.search
+* {String}
+
Gets and sets the serialized query portion of the URL.
```js
@@ -489,17 +625,23 @@ console.log(myURL.href);
```
Any invalid URL characters appearing in the value assigned the `search`
-property will be [percent-encoded](#whatwg-percent-encoding). Note that the
-selection of which characters to percent-encode may vary somewhat from what the
-[`url.parse()`][] and [`url.format()`][] methods would produce.
+property will be [percent-encoded][]. Note that the selection of which
+characters to percent-encode may vary somewhat from what the [`url.parse()`][]
+and [`url.format()`][] methods would produce.
#### url.searchParams
-Gets a [`URLSearchParams`](#url_class_urlsearchparams) object representing the
-query parameters of the URL.
+* {URLSearchParams}
+
+Gets the [`URLSearchParams`][] object representing the query parameters of the
+URL. This property is read-only; to replace the entirety of query parameters of
+the URL, use the [`url.search`][] setter. See [`URLSearchParams`][]
+documentation for details.
#### url.username
+* {String}
+
Gets and sets the username portion of the URL.
```js
@@ -513,20 +655,51 @@ console.log(myURL.href);
```
Any invalid URL characters appearing in the value assigned the `username`
-property will be [percent-encoded](#whatwg-percent-encoding). Note that the
-selection of which characters to percent-encode may vary somewhat from what the
-[`url.parse()`][] and [`url.format()`][] methods would produce.
+property will be [percent-encoded][]. Note that the selection of which
+characters to percent-encode may vary somewhat from what the [`url.parse()`][]
+and [`url.format()`][] methods would produce.
#### url.toString()
+* Returns: {String}
+
The `toString()` method on the `URL` object returns the serialized URL. The
-value returned is equivalent to that of `url.href`.
+value returned is equivalent to that of [`url.href`][] and [`url.toJSON()`][].
+
+Because of the need for standard compliance, this method does not allow users
+to customize the serialization process of the URL. For more flexibility,
+[`require('url').format()`][] method might be of interest.
+
+#### url.toJSON()
+
+* Returns: {String}
+
+The `toJSON()` method on the `URL` object returns the serialized URL. The
+value returned is equivalent to that of [`url.href`][] and
+[`url.toString()`][].
+
+This method is automatically called when an `URL` object is serialized
+with [`JSON.stringify()`][].
+
+```js
+const myURLs = [
+ new URL('https://www.example.com'),
+ new URL('https://test.example.org')
+];
+console.log(JSON.stringify(myURLs));
+ // Prints ["https://www.example.com/","https://test.example.org/"]
+```
### Class: URLSearchParams
-The `URLSearchParams` object provides read and write access to the query of a
+The `URLSearchParams` API provides read and write access to the query of a
`URL`.
+The WHATWG `URLSearchParams` interface and the [`querystring`][] module have
+similar purpose, but the purpose of the [`querystring`][] module is more
+general, as it allows the customization of delimiter characters (`&` and `=`).
+On the other hand, this API is designed purely for URL query strings.
+
```js
const URL = require('url').URL;
const myURL = new URL('https://example.org/?abc=123');
@@ -568,36 +741,41 @@ Returns an ES6 Iterator over each of the name-value pairs in the query.
Each item of the iterator is a JavaScript Array. The first item of the Array
is the `name`, the second item of the Array is the `value`.
-Alias for `urlSearchParams\[\@\@iterator\]()`.
+Alias for [`urlSearchParams[@@iterator]()`][`urlSearchParams@@iterator()`].
-#### urlSearchParams.forEach(fn)
+#### urlSearchParams.forEach(fn[, thisArg])
* `fn` {Function} Function invoked for each name-value pair in the query.
+* `thisArg` {Object} Object to be used as `this` value for when `fn` is called
Iterates over each name-value pair in the query and invokes the given function.
```js
const URL = require('url').URL;
const myURL = new URL('https://example.org/?a=b&c=d');
-myURL.searchParams.forEach((value, name) => {
- console.log(name, value);
+myURL.searchParams.forEach((value, name, searchParams) => {
+ console.log(name, value, myURL.searchParams === searchParams);
});
+ // Prints:
+ // a b true
+ // c d true
```
#### urlSearchParams.get(name)
* `name` {String}
-* Returns: {String} or `null` if there is no name-value pair with the given
- `name`.
+* Returns: {String | Null}
-Returns the value of the first name-value pair whose name is `name`.
+Returns the value of the first name-value pair whose name is `name`. If there
+are no such pairs, `null` is returned.
#### urlSearchParams.getAll(name)
* `name` {String}
* Returns: {Array}
-Returns the values of all name-value pairs whose name is `name`.
+Returns the values of all name-value pairs whose name is `name`. If there are
+no such pairs, an empty array is returned.
#### urlSearchParams.has(name)
@@ -612,19 +790,64 @@ Returns `true` if there is at least one name-value pair whose name is `name`.
Returns an ES6 Iterator over the names of each name-value pair.
+```js
+const { URLSearchParams } = require('url');
+const params = new URLSearchParams('foo=bar&foo=baz');
+for (const name of params.keys()) {
+ console.log(name);
+}
+ // Prints:
+ // foo
+ // foo
+```
+
#### urlSearchParams.set(name, value)
* `name` {String}
* `value` {String}
-Remove any existing name-value pairs whose name is `name` and append a new
-name-value pair.
+Sets the value in the `URLSearchParams` object associated with `name` to
+`value`. If there are any pre-existing name-value pairs whose names are `name`,
+set the first such pair's value to `value` and remove all others. If not,
+append the name-value pair to the query string.
+
+```js
+const { URLSearchParams } = require('url');
+
+const params = new URLSearchParams();
+params.append('foo', 'bar');
+params.append('foo', 'baz');
+params.append('abc', 'def');
+console.log(params.toString());
+ // Prints foo=bar&foo=baz&abc=def
+
+params.set('foo', 'def');
+params.set('xyz', 'opq');
+console.log(params.toString());
+ // Prints foo=def&abc=def&xyz=opq
+```
+
+#### urlSearchParams.sort()
+
+Sort all existing name-value pairs in-place by their names. Sorting is done
+with a [stable sorting algorithm][], so relative order between name-value pairs
+with the same name is preserved.
+
+This method can be used, in particular, to increase cache hits.
+
+```js
+const params = new URLSearchParams('query[]=abc&type=search&query[]=123');
+params.sort();
+console.log(params.toString());
+ // Prints query%5B%5D=abc&query%5B%5D=123&type=search
+```
#### urlSearchParams.toString()
* Returns: {String}
-Returns the search parameters serialized as a URL-encoded string.
+Returns the search parameters serialized as a string, with characters
+percent-encoded where necessary.
#### urlSearchParams.values()
@@ -632,7 +855,7 @@ Returns the search parameters serialized as a URL-encoded string.
Returns an ES6 Iterator over the values of each name-value pair.
-#### urlSearchParams\[\@\@iterator\]()
+#### urlSearchParams\[@@iterator\]()
* Returns: {Iterator}
@@ -640,7 +863,18 @@ Returns an ES6 Iterator over each of the name-value pairs in the query string.
Each item of the iterator is a JavaScript Array. The first item of the Array
is the `name`, the second item of the Array is the `value`.
-Alias for `urlSearchParams.entries()`.
+Alias for [`urlSearchParams.entries()`][].
+
+```js
+const { URLSearchParams } = require('url');
+const params = new URLSearchParams('foo=bar&xyz=baz');
+for (const [name, value] of params) {
+ console.log(name, value);
+}
+ // Prints:
+ // foo bar
+ // xyz baz
+```
### require('url').domainToAscii(domain)
@@ -711,4 +945,17 @@ console.log(myURL.origin);
[examples of parsed URLs]: https://url.spec.whatwg.org/#example-url-parsing
[`url.parse()`]: #url_url_parse_urlstring_parsequerystring_slashesdenotehost
[`url.format()`]: #url_url_format_urlobject
+[`require('url').format()`]: #url_url_format_url_options
+[`url.toString()`]: #url_url_tostring
[Punycode]: https://tools.ietf.org/html/rfc5891#section-4.4
+[WHATWG URL]: #url_the_whatwg_url_api
+[`new URL()`]: #url_constructor_new_url_input_base
+[`url.href`]: #url_url_href
+[`url.search`]: #url_url_search
+[percent-encoded]: #whatwg-percent-encoding
+[`URLSearchParams`]: #url_class_urlsearchparams
+[`urlSearchParams.entries()`]: #url_urlsearchparams_entries
+[`urlSearchParams@@iterator()`]: #url_urlsearchparams_iterator
+[stable sorting algorithm]: https://en.wikipedia.org/wiki/Sorting_algorithm#Stability
+[`JSON.stringify()`]: https://developer.mozilla.org/en/docs/Web/JavaScript/Reference/Global_Objects/JSON/stringify
+[`url.toJSON()`]: #url_url_tojson
diff --git a/doc/api/util.md b/doc/api/util.md
index 3843f80ce75366..726691b8d6dabd 100644
--- a/doc/api/util.md
+++ b/doc/api/util.md
@@ -136,6 +136,10 @@ util.format(1, 2, 3); // '1 2 3'
## util.inherits(constructor, superConstructor)
_Note: usage of `util.inherits()` is discouraged. Please use the ES6 `class` and
@@ -203,6 +207,20 @@ stream.write('With ES6');
## util.inspect(object[, options])
* `object` {any} Any JavaScript primitive or Object.
@@ -343,6 +361,14 @@ util.inspect(obj);
// Returns: "{ bar: 'baz' }"
```
+### util.inspect.custom
+
+
+A Symbol that can be used to declare custom inspect functions, see
+[Custom inspection functions on Objects][].
+
### util.inspect.defaultOptions
-A Symbol that can be used to declare custom inspect functions, see
-[Custom inspection functions on Objects][].
+> Stability: 0 - Deprecated: Use [`Object.assign()`] instead.
-## Deprecated APIs
+The `util._extend()` method was never intended to be used outside of internal
+Node.js modules. The community found and used it anyway.
-The following APIs have been deprecated and should no longer be used. Existing
-applications and modules should be updated to find alternative approaches.
+It is deprecated and should not be used in new code. JavaScript comes with very
+similar built-in functionality through [`Object.assign()`].
### util.debug(string)
-
-> Stability: 0 - Deprecated: Use [`Object.assign()`] instead.
-
-The `util._extend()` method was never intended to be used outside of internal
-Node.js modules. The community found and used it anyway.
-
-It is deprecated and should not be used in new code. JavaScript comes with very
-similar built-in functionality through [`Object.assign()`].
-
[`Array.isArray`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/isArray
[constructor]: https://developer.mozilla.org/en/JavaScript/Reference/Global_Objects/Object/constructor
[semantically incompatible]: https://github.com/nodejs/node/issues/4179
diff --git a/doc/api/v8.md b/doc/api/v8.md
index 8cec6bdfdf8007..be222bbabf63ed 100644
--- a/doc/api/v8.md
+++ b/doc/api/v8.md
@@ -9,48 +9,13 @@ const v8 = require('v8');
*Note*: The APIs and implementation are subject to change at any time.
-## v8.getHeapStatistics()
-
-
-Returns an object with the following properties:
-
-* `total_heap_size` {number}
-* `total_heap_size_executable` {number}
-* `total_physical_size` {number}
-* `total_available_size` {number}
-* `used_heap_size` {number}
-* `heap_size_limit` {number}
-* `malloced_memory` {number}
-* `peak_malloced_memory` {number}
-* `does_zap_garbage` {number}
-
-`does_zap_garbage` is a 0/1 boolean, which signifies whether the `--zap_code_space`
-option is enabled or not. This makes V8 overwrite heap garbage with a bit
-pattern. The RSS footprint (resident memory set) gets bigger because it
-continuously touches all heap pages and that makes them less likely to get
-swapped out by the operating system.
-
-For example:
-
-```js
-{
- total_heap_size: 7326976,
- total_heap_size_executable: 4194304,
- total_physical_size: 7326976,
- total_available_size: 1152656,
- used_heap_size: 3476208,
- heap_size_limit: 1535115264,
- malloced_memory: 16384,
- peak_malloced_memory: 1127496,
- does_zap_garbage: 0
-}
-```
-
## v8.getHeapSpaceStatistics()
Returns statistics about the V8 heap spaces, i.e. the segments which make up
@@ -108,6 +73,53 @@ For example:
]
```
+## v8.getHeapStatistics()
+
+
+Returns an object with the following properties:
+
+* `total_heap_size` {number}
+* `total_heap_size_executable` {number}
+* `total_physical_size` {number}
+* `total_available_size` {number}
+* `used_heap_size` {number}
+* `heap_size_limit` {number}
+* `malloced_memory` {number}
+* `peak_malloced_memory` {number}
+* `does_zap_garbage` {number}
+
+`does_zap_garbage` is a 0/1 boolean, which signifies whether the `--zap_code_space`
+option is enabled or not. This makes V8 overwrite heap garbage with a bit
+pattern. The RSS footprint (resident memory set) gets bigger because it
+continuously touches all heap pages and that makes them less likely to get
+swapped out by the operating system.
+
+For example:
+
+```js
+{
+ total_heap_size: 7326976,
+ total_heap_size_executable: 4194304,
+ total_physical_size: 7326976,
+ total_available_size: 1152656,
+ used_heap_size: 3476208,
+ heap_size_limit: 1535115264,
+ malloced_memory: 16384,
+ peak_malloced_memory: 1127496,
+ does_zap_garbage: 0
+}
+```
+
## v8.setFlagsFromString(string)
* `code` {string} The JavaScript code to compile.
@@ -60,6 +65,10 @@ each run, just for that run.
### script.runInContext(contextifiedSandbox[, options])
* `contextifiedSandbox` {Object} A [contextified][] object as returned by the
diff --git a/doc/api/zlib.md b/doc/api/zlib.md
index 0d79c084acd565..5b02a8ed37d278 100644
--- a/doc/api/zlib.md
+++ b/doc/api/zlib.md
@@ -277,6 +277,10 @@ Compression strategy.
## Class Options
@@ -315,6 +319,17 @@ Compress data using deflate, and do not append a `zlib` header.
## Class: zlib.Gunzip
Decompress a gzip stream.
@@ -329,6 +344,10 @@ Compress data using gzip.
## Class: zlib.Inflate
Decompress a deflate stream.
@@ -336,6 +355,13 @@ Decompress a deflate stream.
## Class: zlib.InflateRaw
Decompress a raw deflate stream.
diff --git a/doc/api_assets/style.css b/doc/api_assets/style.css
index f45c4672af88f8..7889389f59b251 100644
--- a/doc/api_assets/style.css
+++ b/doc/api_assets/style.css
@@ -470,6 +470,12 @@ th > *:last-child, td > *:last-child {
margin-bottom: 0;
}
+.changelog > summary {
+ margin: .5rem 0;
+ padding: .5rem 0;
+ cursor: pointer;
+}
+
/* simpler clearfix */
.clearfix:after {
content: ".";
diff --git a/doc/changelogs/CHANGELOG_V7.md b/doc/changelogs/CHANGELOG_V7.md
index 3355de15e35795..b1d3310a0668e9 100644
--- a/doc/changelogs/CHANGELOG_V7.md
+++ b/doc/changelogs/CHANGELOG_V7.md
@@ -6,6 +6,7 @@
+7.7.0
7.6.0
7.5.0
7.4.0
@@ -27,6 +28,184 @@
* [io.js](CHANGELOG_IOJS.md)
* [Archive](CHANGELOG_ARCHIVE.md)
+
+## 2017-02-28, Version 7.7.0 (Current), @italoacasas
+
+This release contains a deprecation warning for `node --debug`. You can find more information in the
+[Diagnostics Working Group Update](https://nodejs.org/en/blog/wg/diag-wg-update-2017-02/)
+
+### Notables changes
+
+* **child_process**: spawnSync() exit code now is null when the child is killed via signal (cjihrig) [#11288](https://github.com/nodejs/node/pull/11288)
+* **http**: new functions to access the headers for an outgoing HTTP message (Brian White) [#11562](https://github.com/nodejs/node/pull/11562)
+* **lib**: deprecate node --debug at runtime (Josh Gavant) [#11275](https://github.com/nodejs/node/pull/11275)
+* **tls**: new tls.TLSSocket() supports sec ctx options (Sam Roberts) [#11005](https://github.com/nodejs/node/pull/11005)
+* **url**: adding URL.prototype.toJSON support (Michaël Zasso) [#11236](https://github.com/nodejs/node/pull/11236)
+* **doc**: items in the API documentation may now have changelogs (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* **crypto**: adding support for OPENSSL_CONF again (Sam Roberts) [#11006](https://github.com/nodejs/node/pull/11006)
+* **src**: adding support for trace-event tracing (misterpoe) [#11106](https://github.com/nodejs/node/pull/11106)
+
+### Commits
+
+* [[`18599fc3d7`](https://github.com/nodejs/node/commit/18599fc3d7)] - doc/url: various improvements to WHATWG API (Timothy Gu)
+* [[`e7d37a3f09`](https://github.com/nodejs/node/commit/e7d37a3f09)] - tools/doc: add more intrinsic and custom types (Timothy Gu)
+* [[`6bcc841786`](https://github.com/nodejs/node/commit/6bcc841786)] - **assert**: apply minor refactoring (Rich Trott) [#11511](https://github.com/nodejs/node/pull/11511)
+* [[`6a2f330dbd`](https://github.com/nodejs/node/commit/6a2f330dbd)] - **assert**: remove unneeded condition (Rich Trott) [#11314](https://github.com/nodejs/node/pull/11314)
+* [[`0762482339`](https://github.com/nodejs/node/commit/0762482339)] - **assert**: unlock the assert API (Rich Trott) [#11304](https://github.com/nodejs/node/pull/11304)
+* [[`842ac583f6`](https://github.com/nodejs/node/commit/842ac583f6)] - **benchmark**: add url.domainTo*() (Timothy Gu) [#11464](https://github.com/nodejs/node/pull/11464)
+* [[`3951bd9ac1`](https://github.com/nodejs/node/commit/3951bd9ac1)] - **benchmark**: strip BOM in dgram/bind-params (Anna Henningsen) [#11479](https://github.com/nodejs/node/pull/11479)
+* [[`e1573b9fb7`](https://github.com/nodejs/node/commit/e1573b9fb7)] - **benchmark**: add dgram bind(+/- params) benchmark (Vse Mozhet Byt) [#11313](https://github.com/nodejs/node/pull/11313)
+* [[`48f6660d78`](https://github.com/nodejs/node/commit/48f6660d78)] - **benchmark**: fix timer display in progress output (Brian White) [#11235](https://github.com/nodejs/node/pull/11235)
+* [[`5a81031fd8`](https://github.com/nodejs/node/commit/5a81031fd8)] - **benchmark**: clean up legacy url benchmarks (Joyee Cheung)
+* [[`7e37628c51`](https://github.com/nodejs/node/commit/7e37628c51)] - **benchmark**: add url/url-searchparams-sort.js (Timothy Gu)
+* [[`4ffad094ba`](https://github.com/nodejs/node/commit/4ffad094ba)] - **buffer**: refactor slowToString (James M Snell) [#11358](https://github.com/nodejs/node/pull/11358)
+* [[`d08a8e68e8`](https://github.com/nodejs/node/commit/d08a8e68e8)] - **buffer**: avoid use of arguments (James M Snell) [#11358](https://github.com/nodejs/node/pull/11358)
+* [[`4408437796`](https://github.com/nodejs/node/commit/4408437796)] - **build**: add rule to clean addon tests build (Joyee Cheung) [#11519](https://github.com/nodejs/node/pull/11519)
+* [[`8d323bb91a`](https://github.com/nodejs/node/commit/8d323bb91a)] - **build**: fail on CI if leftover processes (Rich Trott) [#11269](https://github.com/nodejs/node/pull/11269)
+* [[`d4a8631bd1`](https://github.com/nodejs/node/commit/d4a8631bd1)] - **build**: fix newlines in addon build output (Brian White) [#11466](https://github.com/nodejs/node/pull/11466)
+* [[`bc9c381027`](https://github.com/nodejs/node/commit/bc9c381027)] - **build**: add code coverage to make (Wayne Andrews) [#10856](https://github.com/nodejs/node/pull/10856)
+* [[`9c45758cdf`](https://github.com/nodejs/node/commit/9c45758cdf)] - **build**: fix building with ninja on linux (Kenan Yildirim) [#11348](https://github.com/nodejs/node/pull/11348)
+* [[`86a647899f`](https://github.com/nodejs/node/commit/86a647899f)] - **build**: don't rebuild test/gc add-on unnecessarily (Ben Noordhuis) [#11311](https://github.com/nodejs/node/pull/11311)
+* [[`c942e2037c`](https://github.com/nodejs/node/commit/c942e2037c)] - **child_process**: refactor internal/child_process.js (Arseniy Maximov) [#11366](https://github.com/nodejs/node/pull/11366)
+* [[`0240eb99a2`](https://github.com/nodejs/node/commit/0240eb99a2)] - **child_process**: remove empty if condition (cjihrig) [#11427](https://github.com/nodejs/node/pull/11427)
+* [[`60fc567952`](https://github.com/nodejs/node/commit/60fc567952)] - **child_process**: move anonymous class to top level (Jackson Tian) [#11147](https://github.com/nodejs/node/pull/11147)
+* [[`58e2517fc0`](https://github.com/nodejs/node/commit/58e2517fc0)] - **child_process**: exit spawnSync with null on signal (cjihrig) [#11288](https://github.com/nodejs/node/pull/11288)
+* [[`4b4bc13758`](https://github.com/nodejs/node/commit/4b4bc13758)] - **cluster**: properly handle --inspect-{brk,port} (Ali Ijaz Sheikh) [#11386](https://github.com/nodejs/node/pull/11386)
+* [[`570c5e1da8`](https://github.com/nodejs/node/commit/570c5e1da8)] - **(SEMVER-MINOR)** **crypto**: support OPENSSL_CONF again (Sam Roberts) [#11006](https://github.com/nodejs/node/pull/11006)
+* [[`d4000e73ed`](https://github.com/nodejs/node/commit/d4000e73ed)] - **deps**: cherry-pick 7c982e7 from V8 upstream (Jaideep Bajwa) [#11263](https://github.com/nodejs/node/pull/11263)
+* [[`bd4ccc892c`](https://github.com/nodejs/node/commit/bd4ccc892c)] - **src**: add tracing controller (misterpoe) [#11106](https://github.com/nodejs/node/pull/11106)
+* [[`aef67cfe39`](https://github.com/nodejs/node/commit/aef67cfe39)] - **dgram**: fix possibly deoptimizing use of arguments (Vse Mozhet Byt) [#11242](https://github.com/nodejs/node/pull/11242)
+* [[`662b0c31ce`](https://github.com/nodejs/node/commit/662b0c31ce)] - **dns**: avoid use of arguments (James M Snell) [#11359](https://github.com/nodejs/node/pull/11359)
+* [[`fedf26b235`](https://github.com/nodejs/node/commit/fedf26b235)] - **doc**: update V8 debugger doc to mention --inspect-brk (James Ide) [#11495](https://github.com/nodejs/node/pull/11495)
+* [[`1c7f221ef5`](https://github.com/nodejs/node/commit/1c7f221ef5)] - **doc**: adding deprecations.md (Italo A. Casas) [#11621](https://github.com/nodejs/node/pull/11621)
+* [[`90bdf16507`](https://github.com/nodejs/node/commit/90bdf16507)] - **doc**: link to readable and writeable stream section (Sebastian Van Sande) [#11517](https://github.com/nodejs/node/pull/11517)
+* [[`3b66ccf0ff`](https://github.com/nodejs/node/commit/3b66ccf0ff)] - **doc**: document clientRequest.aborted (Zach Bjornson) [#11544](https://github.com/nodejs/node/pull/11544)
+* [[`128f812157`](https://github.com/nodejs/node/commit/128f812157)] - **doc**: argument types for assert methods (Amelia Clarke) [#11548](https://github.com/nodejs/node/pull/11548)
+* [[`b1b6b8b730`](https://github.com/nodejs/node/commit/b1b6b8b730)] - **doc**: add changelogs for buffer (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`597945136e`](https://github.com/nodejs/node/commit/597945136e)] - **doc**: add changelogs for v8 (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`b01fd8ce3a`](https://github.com/nodejs/node/commit/b01fd8ce3a)] - **doc**: fix sorting in API references (Vse Mozhet Byt) [#11529](https://github.com/nodejs/node/pull/11529)
+* [[`56cd1932c1`](https://github.com/nodejs/node/commit/56cd1932c1)] - **doc**: note message event listeners ref IPC channels (Diego Rodríguez Baquero) [#11494](https://github.com/nodejs/node/pull/11494)
+* [[`47034e12ad`](https://github.com/nodejs/node/commit/47034e12ad)] - **doc**: change broken fg(1) links to fg(1p) (Karan Thakkar) [#11504](https://github.com/nodejs/node/pull/11504)
+* [[`47dc5662f3`](https://github.com/nodejs/node/commit/47dc5662f3)] - **doc**: add changelogs for zlib (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`4d122700ab`](https://github.com/nodejs/node/commit/4d122700ab)] - **doc**: add changelogs for vm (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`b868468942`](https://github.com/nodejs/node/commit/b868468942)] - **doc**: add changelogs for util (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`93e7639c12`](https://github.com/nodejs/node/commit/93e7639c12)] - **doc**: add changelogs for url (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`f7d59e5568`](https://github.com/nodejs/node/commit/f7d59e5568)] - **doc**: add changelogs for tls (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`fc53547ed4`](https://github.com/nodejs/node/commit/fc53547ed4)] - **doc**: add changelogs for stream (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`c373e07a09`](https://github.com/nodejs/node/commit/c373e07a09)] - **doc**: add changelogs for repl (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`962d27dbde`](https://github.com/nodejs/node/commit/962d27dbde)] - **doc**: add changelogs for readline (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`7c609dc30a`](https://github.com/nodejs/node/commit/7c609dc30a)] - **doc**: add changelogs for querystring (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`6285ff2275`](https://github.com/nodejs/node/commit/6285ff2275)] - **doc**: add changelogs for punycode (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`df30bc869a`](https://github.com/nodejs/node/commit/df30bc869a)] - **doc**: add changelogs for process (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`c1477b9bd3`](https://github.com/nodejs/node/commit/c1477b9bd3)] - **doc**: add changelogs for path (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`ac10a3b306`](https://github.com/nodejs/node/commit/ac10a3b306)] - **doc**: add changelogs for os (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`3183397c8a`](https://github.com/nodejs/node/commit/3183397c8a)] - **doc**: add changelogs for net (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`6cc8f19e99`](https://github.com/nodejs/node/commit/6cc8f19e99)] - **doc**: add changelogs for http (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`f0cee80de7`](https://github.com/nodejs/node/commit/f0cee80de7)] - **doc**: add changelogs for fs (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`354161d804`](https://github.com/nodejs/node/commit/354161d804)] - **doc**: add changelogs for events (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`4f936014ff`](https://github.com/nodejs/node/commit/4f936014ff)] - **doc**: add changelogs for dns (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`5bc9349d40`](https://github.com/nodejs/node/commit/5bc9349d40)] - **doc**: add changelogs for dgram (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`e23598d09f`](https://github.com/nodejs/node/commit/e23598d09f)] - **doc**: add changelogs for crypto (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`296e22adce`](https://github.com/nodejs/node/commit/296e22adce)] - **doc**: add changelogs for console (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`de83e215cb`](https://github.com/nodejs/node/commit/de83e215cb)] - **doc**: add changelogs for cluster (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`5d4e638e34`](https://github.com/nodejs/node/commit/5d4e638e34)] - **doc**: add changelogs for cli (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`ad1ad4d06d`](https://github.com/nodejs/node/commit/ad1ad4d06d)] - **doc**: add changelogs for child_process (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`42413b611b`](https://github.com/nodejs/node/commit/42413b611b)] - **doc**: add changelogs for assert (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`d3013678fb`](https://github.com/nodejs/node/commit/d3013678fb)] - **doc**: change STYLE-GUIDE to STYLE_GUIDE (Dean Coakley) [#11460](https://github.com/nodejs/node/pull/11460)
+* [[`c5ff76dadf`](https://github.com/nodejs/node/commit/c5ff76dadf)] - **doc**: restrict the ES.Next features usage in tests (DavidCai) [#11452](https://github.com/nodejs/node/pull/11452)
+* [[`98eb18ba3f`](https://github.com/nodejs/node/commit/98eb18ba3f)] - **doc**: add comment for net.Server's error event (QianJin2013) [#11136](https://github.com/nodejs/node/pull/11136)
+* [[`20d86db9bb`](https://github.com/nodejs/node/commit/20d86db9bb)] - **doc**: add version meta for SSL_CERT_DIR/FILE (Sam Roberts) [#11007](https://github.com/nodejs/node/pull/11007)
+* [[`66f9506c63`](https://github.com/nodejs/node/commit/66f9506c63)] - **doc**: improve test/README.md (Joyee Cheung) [#11237](https://github.com/nodejs/node/pull/11237)
+* [[`5d12fd9a4b`](https://github.com/nodejs/node/commit/5d12fd9a4b)] - **doc**: add benchmark/README.md and fix guide (Joyee Cheung) [#11237](https://github.com/nodejs/node/pull/11237)
+* [[`22a6eddc5c`](https://github.com/nodejs/node/commit/22a6eddc5c)] - **doc**: move benchmark/README.md to doc/guides (Joyee Cheung) [#11237](https://github.com/nodejs/node/pull/11237)
+* [[`12cf359423`](https://github.com/nodejs/node/commit/12cf359423)] - **doc**: add comment for net.Server.listen IPv6 '::' (QianJin2013) [#11134](https://github.com/nodejs/node/pull/11134)
+* [[`83fe819131`](https://github.com/nodejs/node/commit/83fe819131)] - **doc**: add STYLE_GUIDE (moved from nodejs/docs) (Gibson Fahnestock) [#11321](https://github.com/nodejs/node/pull/11321)
+* [[`ef1731d972`](https://github.com/nodejs/node/commit/ef1731d972)] - **doc**: add missing function to test common doc (Rich Trott) [#11382](https://github.com/nodejs/node/pull/11382)
+* [[`c3c874f514`](https://github.com/nodejs/node/commit/c3c874f514)] - **doc**: dns examples implied string args were arrays (Sam Roberts) [#11350](https://github.com/nodejs/node/pull/11350)
+* [[`5f1a568ccc`](https://github.com/nodejs/node/commit/5f1a568ccc)] - **doc**: describe when stdout/err is sync (Sam Roberts) [#10884](https://github.com/nodejs/node/pull/10884)
+* [[`5a2db15736`](https://github.com/nodejs/node/commit/5a2db15736)] - **doc**: add documentation for url.format(URL\[, options\]); (James M Snell)
+* [[`4d7c9427c1`](https://github.com/nodejs/node/commit/4d7c9427c1)] - **doc**: synchronize + update _toc.md and all.md (Vse Mozhet Byt) [#11206](https://github.com/nodejs/node/pull/11206)
+* [[`6a45265e81`](https://github.com/nodejs/node/commit/6a45265e81)] - **doc**: update code examples in domain.md (Vse Mozhet Byt) [#11110](https://github.com/nodejs/node/pull/11110)
+* [[`89b66dc636`](https://github.com/nodejs/node/commit/89b66dc636)] - **doc,test**: args to `buffer.copy` can be Uint8Arrays (Anna Henningsen) [#11486](https://github.com/nodejs/node/pull/11486)
+* [[`4f6a3d38c3`](https://github.com/nodejs/node/commit/4f6a3d38c3)] - **domain,events**: support non-object 'error' argument (Ben Noordhuis) [#11438](https://github.com/nodejs/node/pull/11438)
+* [[`214a39294a`](https://github.com/nodejs/node/commit/214a39294a)] - **(SEMVER-MINOR)** **errors**: add internal/errors.js (James M Snell) [#11220](https://github.com/nodejs/node/pull/11220)
+* [[`758126301e`](https://github.com/nodejs/node/commit/758126301e)] - **fs**: improve performance for sync stat() functions (Brian White) [#11522](https://github.com/nodejs/node/pull/11522)
+* [[`3e8d43d165`](https://github.com/nodejs/node/commit/3e8d43d165)] - **http**: add new functions to OutgoingMessage (Brian White) [#11562](https://github.com/nodejs/node/pull/11562)
+* [[`614742b67f`](https://github.com/nodejs/node/commit/614742b67f)] - **(SEMVER-MINOR)** **lib**: deprecate node --debug at runtime (Josh Gavant) [#11275](https://github.com/nodejs/node/pull/11275)
+* [[`a710167c79`](https://github.com/nodejs/node/commit/a710167c79)] - **lib**: rename kMaxCallbacksUntilQueueIsShortened (JungMinu) [#11473](https://github.com/nodejs/node/pull/11473)
+* [[`61e1af2155`](https://github.com/nodejs/node/commit/61e1af2155)] - **lib**: remove unnecessary assignments with _extend (Sakthipriyan Vairamani (thefourtheye)) [#11364](https://github.com/nodejs/node/pull/11364)
+* [[`d1549bf8d9`](https://github.com/nodejs/node/commit/d1549bf8d9)] - **lib**: add constant kMaxCallbacksUntilQueueIsShortened (Daniel Bevenius) [#11199](https://github.com/nodejs/node/pull/11199)
+* [[`3afe90dc9b`](https://github.com/nodejs/node/commit/3afe90dc9b)] - **net**: prefer === to == (Arseniy Maximov) [#11513](https://github.com/nodejs/node/pull/11513)
+* [[`db06c7311b`](https://github.com/nodejs/node/commit/db06c7311b)] - **os**: improve loadavg() performance (Brian White) [#11516](https://github.com/nodejs/node/pull/11516)
+* [[`fe7a722468`](https://github.com/nodejs/node/commit/fe7a722468)] - **process**: fix typo in comments (levsthings) [#11503](https://github.com/nodejs/node/pull/11503)
+* [[`54e1f0c219`](https://github.com/nodejs/node/commit/54e1f0c219)] - **process**: improve memoryUsage() performance (Brian White) [#11497](https://github.com/nodejs/node/pull/11497)
+* [[`fb85f5049e`](https://github.com/nodejs/node/commit/fb85f5049e)] - **src**: clean up MaybeStackBuffer (Timothy Gu) [#11464](https://github.com/nodejs/node/pull/11464)
+* [[`beda32675f`](https://github.com/nodejs/node/commit/beda32675f)] - **src**: don't assume v8::Local is using-declared (Timothy Gu) [#11464](https://github.com/nodejs/node/pull/11464)
+* [[`64a92565e0`](https://github.com/nodejs/node/commit/64a92565e0)] - **src**: update http-parser link (Daniel Bevenius) [#11477](https://github.com/nodejs/node/pull/11477)
+* [[`539e83a820`](https://github.com/nodejs/node/commit/539e83a820)] - **src**: remove usage of deprecated debug API (Yang Guo) [#11437](https://github.com/nodejs/node/pull/11437)
+* [[`8be6702539`](https://github.com/nodejs/node/commit/8be6702539)] - **(SEMVER-MINOR)** **src**: add SafeGetenv() to internal API (Sam Roberts) [#11006](https://github.com/nodejs/node/pull/11006)
+* [[`7d47f27049`](https://github.com/nodejs/node/commit/7d47f27049)] - **src**: remove unused variable in node_crypto (cjihrig) [#11361](https://github.com/nodejs/node/pull/11361)
+* [[`8a5c0fb0ff`](https://github.com/nodejs/node/commit/8a5c0fb0ff)] - **src**: remove unused typedef (Ben Noordhuis) [#11322](https://github.com/nodejs/node/pull/11322)
+* [[`39b00349b8`](https://github.com/nodejs/node/commit/39b00349b8)] - **src, i18n**: cleanup usage of MaybeStackBuffer (Timothy Gu) [#11464](https://github.com/nodejs/node/pull/11464)
+* [[`d0483ee47b`](https://github.com/nodejs/node/commit/d0483ee47b)] - **test**: change common.expectsError() signature (Rich Trott) [#11512](https://github.com/nodejs/node/pull/11512)
+* [[`f193c6f996`](https://github.com/nodejs/node/commit/f193c6f996)] - **test**: favor assertions over console logging (Rich Trott) [#11547](https://github.com/nodejs/node/pull/11547)
+* [[`4b05ec3b95`](https://github.com/nodejs/node/commit/4b05ec3b95)] - **test**: run test-setproctitle where supported (Howard Hellyer) [#11416](https://github.com/nodejs/node/pull/11416)
+* [[`ff854834b6`](https://github.com/nodejs/node/commit/ff854834b6)] - **test**: fix flaky test-vm-timeout-rethrow (Kunal Pathak) [#11530](https://github.com/nodejs/node/pull/11530)
+* [[`d7fd694cee`](https://github.com/nodejs/node/commit/d7fd694cee)] - **test**: remove redundant additional url tests (Joyee Cheung) [#11439](https://github.com/nodejs/node/pull/11439)
+* [[`e92ddd46bb`](https://github.com/nodejs/node/commit/e92ddd46bb)] - **test**: synchronize WPT url test data (Joyee Cheung) [#11439](https://github.com/nodejs/node/pull/11439)
+* [[`4109e0edc4`](https://github.com/nodejs/node/commit/4109e0edc4)] - **test**: remove WHATWG URL test data file extension (Joyee Cheung) [#11439](https://github.com/nodejs/node/pull/11439)
+* [[`ecb3a7e933`](https://github.com/nodejs/node/commit/ecb3a7e933)] - **(SEMVER-MINOR)** **test**: make tls-socket-default-options tests run (Sam Roberts) [#11005](https://github.com/nodejs/node/pull/11005)
+* [[`f5b4849208`](https://github.com/nodejs/node/commit/f5b4849208)] - **test**: test bottom-up merge sort in URLSearchParams (Daijiro Wachi) [#11399](https://github.com/nodejs/node/pull/11399)
+* [[`ff927b2cf8`](https://github.com/nodejs/node/commit/ff927b2cf8)] - **test**: add cases for unescape & unescapeBuffer (Daijiro Wachi) [#11326](https://github.com/nodejs/node/pull/11326)
+* [[`ea29d4852a`](https://github.com/nodejs/node/commit/ea29d4852a)] - **test**: use expectsError in test-debug-agent.js (Arseniy Maximov) [#11410](https://github.com/nodejs/node/pull/11410)
+* [[`8e455a9093`](https://github.com/nodejs/node/commit/8e455a9093)] - **test**: add test for URLSearchParams inspection (Daijiro Wachi) [#11428](https://github.com/nodejs/node/pull/11428)
+* [[`ae9b891a39`](https://github.com/nodejs/node/commit/ae9b891a39)] - **test**: use expectsError in require-invalid-package (Rich Trott) [#11409](https://github.com/nodejs/node/pull/11409)
+* [[`91fac08c3b`](https://github.com/nodejs/node/commit/91fac08c3b)] - **test**: use common.expectsError() (Rich Trott) [#11408](https://github.com/nodejs/node/pull/11408)
+* [[`46084e3270`](https://github.com/nodejs/node/commit/46084e3270)] - **test**: refactor common.expectsError() (Rich Trott) [#11381](https://github.com/nodejs/node/pull/11381)
+* [[`8fdb6c24f9`](https://github.com/nodejs/node/commit/8fdb6c24f9)] - **test**: throw check in test-zlib-write-after-close (Jason Wilson) [#11482](https://github.com/nodejs/node/pull/11482)
+* [[`b395ed9407`](https://github.com/nodejs/node/commit/b395ed9407)] - **test**: increase coverage of vm (DavidCai) [#11377](https://github.com/nodejs/node/pull/11377)
+* [[`000b2a14c1`](https://github.com/nodejs/node/commit/000b2a14c1)] - **test**: add support for --gtest_filter (Daniel Bevenius) [#11474](https://github.com/nodejs/node/pull/11474)
+* [[`34220b75e2`](https://github.com/nodejs/node/commit/34220b75e2)] - **test**: add regex check to test-module-loading (Tarang Hirani) [#11413](https://github.com/nodejs/node/pull/11413)
+* [[`4509d84095`](https://github.com/nodejs/node/commit/4509d84095)] - **test**: improve coverage in test-crypto.dh (Eric Christie) [#11253](https://github.com/nodejs/node/pull/11253)
+* [[`da10e2649d`](https://github.com/nodejs/node/commit/da10e2649d)] - **test**: add error checking in callback (Rich Trott) [#11446](https://github.com/nodejs/node/pull/11446)
+* [[`7b8087630f`](https://github.com/nodejs/node/commit/7b8087630f)] - **test**: refactor test-http-response-splitting (Arseniy Maximov) [#11429](https://github.com/nodejs/node/pull/11429)
+* [[`c37e2b7690`](https://github.com/nodejs/node/commit/c37e2b7690)] - **test**: add test cases for path (Yuta Hiroto) [#11453](https://github.com/nodejs/node/pull/11453)
+* [[`a523482cca`](https://github.com/nodejs/node/commit/a523482cca)] - **test**: enhance test-common.js (Rich Trott) [#11433](https://github.com/nodejs/node/pull/11433)
+* [[`1d86a9f5eb`](https://github.com/nodejs/node/commit/1d86a9f5eb)] - **test**: fix over-dependence on native promise impl (Ali Ijaz Sheikh) [#11437](https://github.com/nodejs/node/pull/11437)
+* [[`b457f38e68`](https://github.com/nodejs/node/commit/b457f38e68)] - **test**: add coverage for utf8CheckIncomplete() (xiaoyu) [#11419](https://github.com/nodejs/node/pull/11419)
+* [[`ca1bae6f3e`](https://github.com/nodejs/node/commit/ca1bae6f3e)] - **test**: remove unused args and comparison fix (Alexander) [#11396](https://github.com/nodejs/node/pull/11396)
+* [[`8ee236f85a`](https://github.com/nodejs/node/commit/8ee236f85a)] - **test**: improve crypto coverage (樋口 彰) [#11279](https://github.com/nodejs/node/pull/11279)
+* [[`add762550c`](https://github.com/nodejs/node/commit/add762550c)] - **test**: consolidate buffer.read() in a file (larissayvette) [#11297](https://github.com/nodejs/node/pull/11297)
+* [[`e416967244`](https://github.com/nodejs/node/commit/e416967244)] - **test**: cases to querystring related to empty string (Daijiro Wachi) [#11329](https://github.com/nodejs/node/pull/11329)
+* [[`5723087cdd`](https://github.com/nodejs/node/commit/5723087cdd)] - **test**: refactor test-dgram-membership (Rich Trott) [#11388](https://github.com/nodejs/node/pull/11388)
+* [[`aea0d501d7`](https://github.com/nodejs/node/commit/aea0d501d7)] - **test**: improve message in net-connect-local-error (Rich Trott) [#11393](https://github.com/nodejs/node/pull/11393)
+* [[`82882f4e90`](https://github.com/nodejs/node/commit/82882f4e90)] - **test**: cover dgram socket close during bind case (cjihrig) [#11383](https://github.com/nodejs/node/pull/11383)
+* [[`f495389d67`](https://github.com/nodejs/node/commit/f495389d67)] - **test**: refactor test-tls-cert-chains-in-ca (Rich Trott) [#11367](https://github.com/nodejs/node/pull/11367)
+* [[`348f2ef59f`](https://github.com/nodejs/node/commit/348f2ef59f)] - **test**: improve crypto coverage (Akito Ito) [#11280](https://github.com/nodejs/node/pull/11280)
+* [[`e7978f04a4`](https://github.com/nodejs/node/commit/e7978f04a4)] - **test**: cover dgram socket close during cluster bind (cjihrig) [#11292](https://github.com/nodejs/node/pull/11292)
+* [[`66081d1ddb`](https://github.com/nodejs/node/commit/66081d1ddb)] - **test**: increase coverage of buffer (DavidCai) [#11312](https://github.com/nodejs/node/pull/11312)
+* [[`7aaa960f4c`](https://github.com/nodejs/node/commit/7aaa960f4c)] - **test, url**: synchronize WPT url tests (Joyee Cheung)
+* [[`506a1cb03f`](https://github.com/nodejs/node/commit/506a1cb03f)] - **timer,domain**: maintain order of timer callbacks (John Barboza) [#10522](https://github.com/nodejs/node/pull/10522)
+* [[`4e327708a9`](https://github.com/nodejs/node/commit/4e327708a9)] - **(SEMVER-MINOR)** **tls**: new tls.TLSSocket() supports sec ctx options (Sam Roberts) [#11005](https://github.com/nodejs/node/pull/11005)
+* [[`f37ab7968e`](https://github.com/nodejs/node/commit/f37ab7968e)] - **tls**: do not crash on STARTTLS when OCSP requested (Fedor Indutny) [#10706](https://github.com/nodejs/node/pull/10706)
+* [[`5f94ff6231`](https://github.com/nodejs/node/commit/5f94ff6231)] - **tls**: avoid potentially deoptimizing use of arguments (James M Snell) [#11357](https://github.com/nodejs/node/pull/11357)
+* [[`0934a27c75`](https://github.com/nodejs/node/commit/0934a27c75)] - **tools**: enable unicode-bom ESLint rule (Anna Henningsen) [#11479](https://github.com/nodejs/node/pull/11479)
+* [[`eea2eb9111`](https://github.com/nodejs/node/commit/eea2eb9111)] - **tools**: enable one-var-declaration-per-line ESLint rule (Michaël Zasso) [#11462](https://github.com/nodejs/node/pull/11462)
+* [[`5b5dca9076`](https://github.com/nodejs/node/commit/5b5dca9076)] - **tools**: suggest python2 command in configure (Roman Reiss) [#11375](https://github.com/nodejs/node/pull/11375)
+* [[`d9d541d564`](https://github.com/nodejs/node/commit/d9d541d564)] - **tools,doc**: enable changelogs for items (Anna Henningsen) [#11489](https://github.com/nodejs/node/pull/11489)
+* [[`4ee9220565`](https://github.com/nodejs/node/commit/4ee9220565)] - **tty**: avoid oob warning in TTYWrap::GetWindowSize() (Dmitry Tsvettsikh) [#11454](https://github.com/nodejs/node/pull/11454)
+* [[`5f10827248`](https://github.com/nodejs/node/commit/5f10827248)] - **url**: fix handling of ? in URLSearchParams creation (Timothy Gu) [#11372](https://github.com/nodejs/node/pull/11372)
+* [[`72da362d6e`](https://github.com/nodejs/node/commit/72da362d6e)] - **url**: fix file state clarification in binding (Daijiro Wachi) [#11123](https://github.com/nodejs/node/pull/11123)
+* [[`4366ab539f`](https://github.com/nodejs/node/commit/4366ab539f)] - **url**: implement URL.prototype.toJSON (Michaël Zasso) [#11236](https://github.com/nodejs/node/pull/11236)
+* [[`8dbd562590`](https://github.com/nodejs/node/commit/8dbd562590)] - **url**: fix surrogate handling in encodeAuth() (Timothy Gu)
+* [[`c25c16cc1b`](https://github.com/nodejs/node/commit/c25c16cc1b)] - **url**: add urlSearchParams.sort() (Timothy Gu)
+* [[`d8cb65aa6e`](https://github.com/nodejs/node/commit/d8cb65aa6e)] - **url, test**: synchronize WPT url tests for file URL (Daijiro Wachi) [#11123](https://github.com/nodejs/node/pull/11123)
+* [[`237db9c497`](https://github.com/nodejs/node/commit/237db9c497)] - **util**: cleanup internalUtil.deprecate (James M Snell) [#11450](https://github.com/nodejs/node/pull/11450)
+* [[`95bee8f202`](https://github.com/nodejs/node/commit/95bee8f202)] - **util**: eliminate unnecessary exports (James M Snell) [#11451](https://github.com/nodejs/node/pull/11451)
+* [[`3bdac54e67`](https://github.com/nodejs/node/commit/3bdac54e67)] - **util**: use ES2015+ Object.is to check negative zero (Shinnosuke Watanabe) [#11332](https://github.com/nodejs/node/pull/11332)
+* [[`3d133ebd3d`](https://github.com/nodejs/node/commit/3d133ebd3d)] - **util, debugger**: remove internalUtil.error (James M Snell) [#11448](https://github.com/nodejs/node/pull/11448)
+* [[`f55c628b2a`](https://github.com/nodejs/node/commit/f55c628b2a)] - **vm**: refactor vm module (James M Snell) [#11392](https://github.com/nodejs/node/pull/11392)
+
## 2017-02-21, Version 7.6.0 (Current), @italoacasas
diff --git a/benchmark/doc_img/compare-boxplot.png b/doc/guides/doc_img/compare-boxplot.png
similarity index 100%
rename from benchmark/doc_img/compare-boxplot.png
rename to doc/guides/doc_img/compare-boxplot.png
diff --git a/benchmark/doc_img/scatter-plot.png b/doc/guides/doc_img/scatter-plot.png
similarity index 100%
rename from benchmark/doc_img/scatter-plot.png
rename to doc/guides/doc_img/scatter-plot.png
diff --git a/doc/guides/using-internal-errors.md b/doc/guides/using-internal-errors.md
new file mode 100644
index 00000000000000..9f8634dc233b9e
--- /dev/null
+++ b/doc/guides/using-internal-errors.md
@@ -0,0 +1,141 @@
+# Using the internal/errors.js Module
+
+## What is internal/errors.js
+
+The `require('internal/errors')` module is an internal-only module that can be
+used to produce `Error`, `TypeError` and `RangeError` instances that use a
+static, permanent error code and an optionally parameterized message.
+
+The intent of the module is to allow errors provided by Node.js to be assigned a
+permanent identifier. Without a permanent identifier, userland code may need to
+inspect error messages to distinguish one error from another. An unfortunate
+result of that practice is that changes to error messages result in broken code
+in the ecosystem. For that reason, Node.js has considered error message changes
+to be breaking changes. By providing a permanent identifier for a specific
+error, we reduce the need for userland code to inspect error messages.
+
+*Note*: Switching an existing error to use the `internal/errors` module must be
+considered a `semver-major` change. However, once using `internal/errors`,
+changes to `internal/errors` error messages will be handled as `semver-minor`
+or `semver-patch`.
+
+## Using internal/errors.js
+
+The `internal/errors` module exposes three custom `Error` classes that
+are intended to replace existing `Error` objects within the Node.js source.
+
+For instance, an existing `Error` such as:
+
+```js
+ var err = new TypeError('Expected string received ' + type);
+```
+
+Can be replaced by first adding a new error key into the `internal/errors.js`
+file:
+
+```js
+E('FOO', 'Expected string received %s');
+```
+
+Then replacing the existing `new TypeError` in the code:
+
+```js
+ const errors = require('internal/errors');
+ // ...
+ var err = new errors.TypeError('FOO', type);
+```
+
+## Adding new errors
+
+New static error codes are added by modifying the `internal/errors.js` file
+and appending the new error codes to the end using the utility `E()` method.
+
+```js
+E('EXAMPLE_KEY1', 'This is the error value');
+E('EXAMPLE_KEY2', (a, b) => return `${a} ${b}`);
+```
+
+The first argument passed to `E()` is the static identifier. The second
+argument is either a String with optional `util.format()` style replacement
+tags (e.g. `%s`, `%d`), or a function returning a String. The optional
+additional arguments passed to the `errors.message()` function (which is
+used by the `errors.Error`, `errors.TypeError` and `errors.RangeError` classes),
+will be used to format the error message.
+
+## Documenting new errors
+
+Whenever a new static error code is added and used, corresponding documentation
+for the error code should be added to the `doc/api/errors.md` file. This will
+give users a place to go to easily look up the meaning of individual error
+codes.
+
+
+## API
+
+### Class: errors.Error(key[, args...])
+
+* `key` {String} The static error identifier
+* `args...` {Any} Zero or more optional arguments
+
+```js
+const errors = require('internal/errors');
+
+var arg1 = 'foo';
+var arg2 = 'bar';
+const myError = new errors.Error('KEY', arg1, arg2);
+throw myError;
+```
+
+The specific error message for the `myError` instance will depend on the
+associated value of `KEY` (see "Adding new errors").
+
+The `myError` object will have a `code` property equal to the `key` and a
+`name` property equal to `Error[${key}]`.
+
+### Class: errors.TypeError(key[, args...])
+
+* `key` {String} The static error identifier
+* `args...` {Any} Zero or more optional arguments
+
+```js
+const errors = require('internal/errors');
+
+var arg1 = 'foo';
+var arg2 = 'bar';
+const myError = new errors.TypeError('KEY', arg1, arg2);
+throw myError;
+```
+
+The specific error message for the `myError` instance will depend on the
+associated value of `KEY` (see "Adding new errors").
+
+The `myError` object will have a `code` property equal to the `key` and a
+`name` property equal to `TypeError[${key}]`.
+
+### Class: errors.RangeError(key[, args...])
+
+* `key` {String} The static error identifier
+* `args...` {Any} Zero or more optional arguments
+
+```js
+const errors = require('internal/errors');
+
+var arg1 = 'foo';
+var arg2 = 'bar';
+const myError = new errors.RangeError('KEY', arg1, arg2);
+throw myError;
+```
+
+The specific error message for the `myError` instance will depend on the
+associated value of `KEY` (see "Adding new errors").
+
+The `myError` object will have a `code` property equal to the `key` and a
+`name` property equal to `RangeError[${key}]`.
+
+### Method: errors.message(key, args)
+
+* `key` {String} The static error identifier
+* `args` {Array} Zero or more optional arguments passed as an Array
+* Returns: {String}
+
+Returns the formatted error message string for the given `key`.
diff --git a/doc/guides/writing-and-running-benchmarks.md b/doc/guides/writing-and-running-benchmarks.md
new file mode 100644
index 00000000000000..a20f321b7c2408
--- /dev/null
+++ b/doc/guides/writing-and-running-benchmarks.md
@@ -0,0 +1,427 @@
+# How to Write and Run Benchmarks in Node.js Core
+
+## Table of Contents
+
+* [Prerequisites](#prerequisites)
+ * [HTTP Benchmark Requirements](#http-benchmark-requirements)
+ * [Benchmark Analysis Requirements](#benchmark-analysis-requirements)
+* [Running benchmarks](#running-benchmarks)
+ * [Running individual benchmarks](#running-individual-benchmarks)
+ * [Running all benchmarks](#running-all-benchmarks)
+ * [Comparing Node.js versions](#comparing-nodejs-versions)
+ * [Comparing parameters](#comparing-parameters)
+* [Creating a benchmark](#creating-a-benchmark)
+ * [Basics of a benchmark](#basics-of-a-benchmark)
+ * [Creating an HTTP benchmark](#creating-an-http-benchmark)
+
+## Prerequisites
+
+Basic Unix tools are required for some benchmarks.
+[Git for Windows][git-for-windows] includes Git Bash and the necessary tools,
+which need to be included in the global Windows `PATH`.
+
+### HTTP Benchmark Requirements
+
+Most of the HTTP benchmarks require a benchmarker to be installed, this can be
+either [`wrk`][wrk] or [`autocannon`][autocannon].
+
+`Autocannon` is a Node.js script that can be installed using
+`npm install -g autocannon`. It will use the Node.js executable that is in the
+path, hence if you want to compare two HTTP benchmark runs make sure that the
+Node.js version in the path is not altered.
+
+`wrk` may be available through your preferred package manager. If not, you can
+easily build it [from source][wrk] via `make`.
+
+By default `wrk` will be used as benchmarker. If it is not available
+`autocannon` will be used in it its place. When creating a HTTP benchmark you
+can specify which benchmarker should be used. You can force a specific
+benchmarker to be used by providing it as an argument, e. g.:
+
+`node benchmark/run.js --set benchmarker=autocannon http`
+
+`node benchmark/http/simple.js benchmarker=autocannon`
+
+### Benchmark Analysis Requirements
+
+To analyze the results `R` should be installed. Check you package manager or
+download it from https://www.r-project.org/.
+
+The R packages `ggplot2` and `plyr` are also used and can be installed using
+the R REPL.
+
+```R
+$ R
+install.packages("ggplot2")
+install.packages("plyr")
+```
+
+In the event you get a message that you need to select a CRAN mirror first.
+
+You can specify a mirror by adding in the repo parameter.
+
+If we used the "http://cran.us.r-project.org" mirror, it could look something
+like this:
+
+```R
+install.packages("ggplot2", repo="http://cran.us.r-project.org")
+```
+
+Of course, use the mirror that suits your location.
+A list of mirrors is [located here](https://cran.r-project.org/mirrors.html).
+
+## Running benchmarks
+
+### Running individual benchmarks
+
+This can be useful for debugging a benchmark or doing a quick performance
+measure. But it does not provide the statistical information to make any
+conclusions about the performance.
+
+Individual benchmarks can be executed by simply executing the benchmark script
+with node.
+
+```console
+$ node benchmark/buffers/buffer-tostring.js
+
+buffers/buffer-tostring.js n=10000000 len=0 arg=true: 62710590.393305704
+buffers/buffer-tostring.js n=10000000 len=1 arg=true: 9178624.591787899
+buffers/buffer-tostring.js n=10000000 len=64 arg=true: 7658962.8891432695
+buffers/buffer-tostring.js n=10000000 len=1024 arg=true: 4136904.4060201733
+buffers/buffer-tostring.js n=10000000 len=0 arg=false: 22974354.231509723
+buffers/buffer-tostring.js n=10000000 len=1 arg=false: 11485945.656765845
+buffers/buffer-tostring.js n=10000000 len=64 arg=false: 8718280.70650129
+buffers/buffer-tostring.js n=10000000 len=1024 arg=false: 4103857.0726124765
+```
+
+Each line represents a single benchmark with parameters specified as
+`${variable}=${value}`. Each configuration combination is executed in a separate
+process. This ensures that benchmark results aren't affected by the execution
+order due to v8 optimizations. **The last number is the rate of operations
+measured in ops/sec (higher is better).**
+
+Furthermore you can specify a subset of the configurations, by setting them in
+the process arguments:
+
+```console
+$ node benchmark/buffers/buffer-tostring.js len=1024
+
+buffers/buffer-tostring.js n=10000000 len=1024 arg=true: 3498295.68561504
+buffers/buffer-tostring.js n=10000000 len=1024 arg=false: 3783071.1678948295
+```
+
+### Running all benchmarks
+
+Similar to running individual benchmarks, a group of benchmarks can be executed
+by using the `run.js` tool. To see how to use this script,
+run `node benchmark/run.js`. Again this does not provide the statistical
+information to make any conclusions.
+
+```console
+$ node benchmark/run.js arrays
+
+arrays/var-int.js
+arrays/var-int.js n=25 type=Array: 71.90148040747789
+arrays/var-int.js n=25 type=Buffer: 92.89648382795582
+...
+
+arrays/zero-float.js
+arrays/zero-float.js n=25 type=Array: 75.46208316171496
+arrays/zero-float.js n=25 type=Buffer: 101.62785630273159
+...
+
+arrays/zero-int.js
+arrays/zero-int.js n=25 type=Array: 72.31023859816062
+arrays/zero-int.js n=25 type=Buffer: 90.49906662339653
+...
+```
+
+It is possible to execute more groups by adding extra process arguments.
+```console
+$ node benchmark/run.js arrays buffers
+```
+
+### Comparing Node.js versions
+
+To compare the effect of a new Node.js version use the `compare.js` tool. This
+will run each benchmark multiple times, making it possible to calculate
+statistics on the performance measures. To see how to use this script,
+run `node benchmark/compare.js`.
+
+As an example on how to check for a possible performance improvement, the
+[#5134](https://github.com/nodejs/node/pull/5134) pull request will be used as
+an example. This pull request _claims_ to improve the performance of the
+`string_decoder` module.
+
+First build two versions of Node.js, one from the master branch (here called
+`./node-master`) and another with the pull request applied (here called
+`./node-pr-5135`).
+
+The `compare.js` tool will then produce a csv file with the benchmark results.
+
+```console
+$ node benchmark/compare.js --old ./node-master --new ./node-pr-5134 string_decoder > compare-pr-5134.csv
+```
+
+For analysing the benchmark results use the `compare.R` tool.
+
+```console
+$ cat compare-pr-5134.csv | Rscript benchmark/compare.R
+
+ improvement confidence p.value
+string_decoder/string-decoder.js n=250000 chunk=1024 inlen=1024 encoding=ascii 12.46 % *** 1.165345e-04
+string_decoder/string-decoder.js n=250000 chunk=1024 inlen=1024 encoding=base64-ascii 24.70 % *** 1.820615e-15
+string_decoder/string-decoder.js n=250000 chunk=1024 inlen=1024 encoding=base64-utf8 23.60 % *** 2.105625e-12
+string_decoder/string-decoder.js n=250000 chunk=1024 inlen=1024 encoding=utf8 14.04 % *** 1.291105e-07
+string_decoder/string-decoder.js n=250000 chunk=1024 inlen=128 encoding=ascii 6.70 % * 2.928003e-02
+...
+```
+
+In the output, _improvement_ is the relative improvement of the new version,
+hopefully this is positive. _confidence_ tells if there is enough
+statistical evidence to validate the _improvement_. If there is enough evidence
+then there will be at least one star (`*`), more stars is just better. **However
+if there are no stars, then you shouldn't make any conclusions based on the
+_improvement_.** Sometimes this is fine, for example if you are expecting there
+to be no improvements, then there shouldn't be any stars.
+
+**A word of caution:** Statistics is not a foolproof tool. If a benchmark shows
+a statistical significant difference, there is a 5% risk that this
+difference doesn't actually exist. For a single benchmark this is not an
+issue. But when considering 20 benchmarks it's normal that one of them
+will show significance, when it shouldn't. A possible solution is to instead
+consider at least two stars (`**`) as the threshold, in that case the risk
+is 1%. If three stars (`***`) is considered the risk is 0.1%. However this
+may require more runs to obtain (can be set with `--runs`).
+
+_For the statistically minded, the R script performs an [independent/unpaired
+2-group t-test][t-test], with the null hypothesis that the performance is the
+same for both versions. The confidence field will show a star if the p-value
+is less than `0.05`._
+
+The `compare.R` tool can also produce a box plot by using the `--plot filename`
+option. In this case there are 48 different benchmark combinations, thus you
+may want to filter the csv file. This can be done while benchmarking using the
+`--set` parameter (e.g. `--set encoding=ascii`) or by filtering results
+afterwards using tools such as `sed` or `grep`. In the `sed` case be sure to
+keep the first line since that contains the header information.
+
+```console
+$ cat compare-pr-5134.csv | sed '1p;/encoding=ascii/!d' | Rscript benchmark/compare.R --plot compare-plot.png
+
+ improvement confidence p.value
+string_decoder/string-decoder.js n=250000 chunk=1024 inlen=1024 encoding=ascii 12.46 % *** 1.165345e-04
+string_decoder/string-decoder.js n=250000 chunk=1024 inlen=128 encoding=ascii 6.70 % * 2.928003e-02
+string_decoder/string-decoder.js n=250000 chunk=1024 inlen=32 encoding=ascii 7.47 % *** 5.780583e-04
+string_decoder/string-decoder.js n=250000 chunk=16 inlen=1024 encoding=ascii 8.94 % *** 1.788579e-04
+string_decoder/string-decoder.js n=250000 chunk=16 inlen=128 encoding=ascii 10.54 % *** 4.016172e-05
+...
+```
+
+
+
+### Comparing parameters
+
+It can be useful to compare the performance for different parameters, for
+example to analyze the time complexity.
+
+To do this use the `scatter.js` tool, this will run a benchmark multiple times
+and generate a csv with the results. To see how to use this script,
+run `node benchmark/scatter.js`.
+
+```console
+$ node benchmark/scatter.js benchmark/string_decoder/string-decoder.js > scatter.csv
+```
+
+After generating the csv, a comparison table can be created using the
+`scatter.R` tool. Even more useful it creates an actual scatter plot when using
+the `--plot filename` option.
+
+```console
+$ cat scatter.csv | Rscript benchmark/scatter.R --xaxis chunk --category encoding --plot scatter-plot.png --log
+
+aggregating variable: inlen
+
+chunk encoding mean confidence.interval
+ 16 ascii 1111933.3 221502.48
+ 16 base64-ascii 167508.4 33116.09
+ 16 base64-utf8 122666.6 25037.65
+ 16 utf8 783254.8 159601.79
+ 64 ascii 2623462.9 399791.36
+ 64 base64-ascii 462008.3 85369.45
+ 64 base64-utf8 420108.4 85612.05
+ 64 utf8 1358327.5 235152.03
+ 256 ascii 3730343.4 371530.47
+ 256 base64-ascii 663281.2 80302.73
+ 256 base64-utf8 632911.7 81393.07
+ 256 utf8 1554216.9 236066.53
+ 1024 ascii 4399282.0 186436.46
+ 1024 base64-ascii 730426.6 63806.12
+ 1024 base64-utf8 680954.3 68076.33
+ 1024 utf8 1554832.5 237532.07
+```
+
+Because the scatter plot can only show two variables (in this case _chunk_ and
+_encoding_) the rest is aggregated. Sometimes aggregating is a problem, this
+can be solved by filtering. This can be done while benchmarking using the
+`--set` parameter (e.g. `--set encoding=ascii`) or by filtering results
+afterwards using tools such as `sed` or `grep`. In the `sed` case be
+sure to keep the first line since that contains the header information.
+
+```console
+$ cat scatter.csv | sed -E '1p;/([^,]+, ){3}128,/!d' | Rscript benchmark/scatter.R --xaxis chunk --category encoding --plot scatter-plot.png --log
+
+chunk encoding mean confidence.interval
+ 16 ascii 701285.96 21233.982
+ 16 base64-ascii 107719.07 3339.439
+ 16 base64-utf8 72966.95 2438.448
+ 16 utf8 475340.84 17685.450
+ 64 ascii 2554105.08 87067.132
+ 64 base64-ascii 330120.32 8551.707
+ 64 base64-utf8 249693.19 8990.493
+ 64 utf8 1128671.90 48433.862
+ 256 ascii 4841070.04 181620.768
+ 256 base64-ascii 849545.53 29931.656
+ 256 base64-utf8 809629.89 33773.496
+ 256 utf8 1489525.15 49616.334
+ 1024 ascii 4931512.12 165402.805
+ 1024 base64-ascii 863933.22 27766.982
+ 1024 base64-utf8 827093.97 24376.522
+ 1024 utf8 1487176.43 50128.721
+```
+
+
+
+## Creating a benchmark
+
+### Basics of a benchmark
+
+All benchmarks use the `require('../common.js')` module. This contains the
+`createBenchmark(main, configs[, options])` method which will setup your
+benchmark.
+
+The arguments of `createBenchmark` are:
+
+* `main` {Function} The benchmark function,
+ where the code running operations and controlling timers should go
+* `configs` {Object} The benchmark parameters. `createBenchmark` will run all
+ possible combinations of these parameters, unless specified otherwise.
+ Each configuration is a property with an array of possible values.
+ Note that the configuration values can only be strings or numbers.
+* `options` {Object} The benchmark options. At the moment only the `flags`
+ option for specifying command line flags is supported.
+
+`createBenchmark` returns a `bench` object, which is used for timing
+the runtime of the benchmark. Run `bench.start()` after the initialization
+and `bench.end(n)` when the benchmark is done. `n` is the number of operations
+you performed in the benchmark.
+
+The benchmark script will be run twice:
+
+The first pass will configure the benchmark with the combination of
+parameters specified in `configs`, and WILL NOT run the `main` function.
+In this pass, no flags except the ones directly passed via commands
+that you run the benchmarks with will be used.
+
+In the second pass, the `main` function will be run, and the process
+will be launched with:
+
+* The flags you've passed into `createBenchmark` (the third argument)
+* The flags in the command that you run this benchmark with
+
+Beware that any code outside the `main` function will be run twice
+in different processes. This could be troublesome if the code
+outside the `main` function has side effects. In general, prefer putting
+the code inside the `main` function if it's more than just declaration.
+
+```js
+'use strict';
+const common = require('../common.js');
+const SlowBuffer = require('buffer').SlowBuffer;
+
+const configs = {
+ // Number of operations, specified here so they show up in the report.
+ // Most benchmarks just use one value for all runs.
+ n: [1024],
+ type: ['fast', 'slow'], // Custom configurations
+ size: [16, 128, 1024] // Custom configurations
+};
+
+const options = {
+ // Add --expose-internals if you want to require internal modules in main
+ flags: ['--zero-fill-buffers']
+};
+
+// main and configs are required, options is optional.
+const bench = common.createBenchmark(main, configs, options);
+
+// Note that any code outside main will be run twice,
+// in different processes, with different command line arguments.
+
+function main(conf) {
+ // You will only get the flags that you have passed to createBenchmark
+ // earlier when main is run. If you want to benchmark the internal modules,
+ // require them here. For example:
+ // const URL = require('internal/url').URL
+
+ // Start the timer
+ bench.start();
+
+ // Do operations here
+ const BufferConstructor = conf.type === 'fast' ? Buffer : SlowBuffer;
+
+ for (let i = 0; i < conf.n; i++) {
+ new BufferConstructor(conf.size);
+ }
+
+ // End the timer, pass in the number of operations
+ bench.end(conf.n);
+}
+```
+
+### Creating an HTTP benchmark
+
+The `bench` object returned by `createBenchmark` implements
+`http(options, callback)` method. It can be used to run external tool to
+benchmark HTTP servers.
+
+```js
+'use strict';
+
+const common = require('../common.js');
+
+const bench = common.createBenchmark(main, {
+ kb: [64, 128, 256, 1024],
+ connections: [100, 500]
+});
+
+function main(conf) {
+ const http = require('http');
+ const len = conf.kb * 1024;
+ const chunk = Buffer.alloc(len, 'x');
+ const server = http.createServer(function(req, res) {
+ res.end(chunk);
+ });
+
+ server.listen(common.PORT, function() {
+ bench.http({
+ connections: conf.connections,
+ }, function() {
+ server.close();
+ });
+ });
+}
+```
+
+Supported options keys are:
+* `port` - defaults to `common.PORT`
+* `path` - defaults to `/`
+* `connections` - number of concurrent connections to use, defaults to 100
+* `duration` - duration of the benchmark in seconds, defaults to 10
+* `benchmarker` - benchmarker to use, defaults to
+`common.default_http_benchmarker`
+
+[autocannon]: https://github.com/mcollina/autocannon
+[wrk]: https://github.com/wg/wrk
+[t-test]: https://en.wikipedia.org/wiki/Student%27s_t-test#Equal_or_unequal_sample_sizes.2C_unequal_variances
+[git-for-windows]: http://git-scm.com/download/win
diff --git a/doc/guides/writing-tests.md b/doc/guides/writing-tests.md
index 36cf04fc546825..4f226bfdb2580c 100644
--- a/doc/guides/writing-tests.md
+++ b/doc/guides/writing-tests.md
@@ -231,8 +231,12 @@ assert.throws(
For performance considerations, we only use a selected subset of ES.Next
features in JavaScript code in the `lib` directory. However, when writing
-tests, it is encouraged to use ES.Next features that have already landed
-in the ECMAScript specification. For example:
+tests, for the ease of backporting, it is encouraged to use those ES.Next
+features that can be used directly without a flag in [all maintained branches]
+(https://github.com/nodejs/lts), you can check [node.green](http://node.green)
+for all available features in each release.
+
+For example:
* `let` and `const` over `var`
* Template literals over string concatenation
diff --git a/doc/node.1 b/doc/node.1
index ffe72e1d022aa6..026485d3920923 100644
--- a/doc/node.1
+++ b/doc/node.1
@@ -243,6 +243,16 @@ asynchronous when outputting to a TTY on platforms which support async stdio.
Setting this will void any guarantee that stdio will not be interleaved or
dropped at program exit. \fBAvoid use.\fR
+.TP
+.BR OPENSSL_CONF = \fIfile\fR
+Load an OpenSSL configuration file on startup. Among other uses, this can be
+used to enable FIPS-compliant crypto if Node.js is built with
+\fB./configure \-\-openssl\-fips\fR.
+
+If the
+\fB\-\-openssl\-config\fR
+command line option is used, the environment variable is ignored.
+
.TP
.BR SSL_CERT_DIR = \fIdir\fR
If \fB\-\-use\-openssl\-ca\fR is enabled, this overrides and sets OpenSSL's directory
diff --git a/lib/_debug_agent.js b/lib/_debug_agent.js
index eedca7ef5843bb..c731ca374b5e87 100644
--- a/lib/_debug_agent.js
+++ b/lib/_debug_agent.js
@@ -1,5 +1,9 @@
'use strict';
+process.emitWarning(
+ 'node --debug is deprecated. Please use node --inspect instead.',
+ 'DeprecationWarning');
+
const assert = require('assert');
const net = require('net');
const util = require('util');
diff --git a/lib/_debugger.js b/lib/_debugger.js
index d3bf1f9ffa2dab..090c9e5dc650e1 100644
--- a/lib/_debugger.js
+++ b/lib/_debugger.js
@@ -1,6 +1,5 @@
'use strict';
-const internalUtil = require('internal/util');
const util = require('util');
const path = require('path');
const net = require('net');
@@ -11,6 +10,11 @@ const inherits = util.inherits;
const assert = require('assert');
const spawn = require('child_process').spawn;
const Buffer = require('buffer').Buffer;
+const prefix = `(${process.release.name}:${process.pid}) `;
+
+function error(msg) {
+ console.error(`${prefix}${msg}`);
+}
exports.start = function(argv, stdin, stdout) {
argv || (argv = process.argv.slice(2));
@@ -32,8 +36,8 @@ exports.start = function(argv, stdin, stdout) {
stdin.resume();
process.on('uncaughtException', function(e) {
- internalUtil.error('There was an internal error in Node\'s debugger. ' +
- 'Please report this bug.');
+ error('There was an internal error in Node\'s debugger. ' +
+ 'Please report this bug.');
console.error(e.message);
console.error(e.stack);
if (interface_.child) interface_.child.kill();
@@ -521,7 +525,7 @@ Client.prototype.mirrorObject = function(handle, depth, cb) {
cb = cb || function() {};
this.reqLookup(propertyRefs, function(err, res) {
if (err) {
- internalUtil.error('problem with reqLookup');
+ error('problem with reqLookup');
cb(null, handle);
return;
}
@@ -1672,7 +1676,7 @@ Interface.prototype.trySpawn = function(cb) {
process._debugProcess(pid);
} catch (e) {
if (e.code === 'ESRCH') {
- internalUtil.error(`Target process: ${pid} doesn't exist.`);
+ error(`Target process: ${pid} doesn't exist.`);
process.exit(1);
}
throw e;
@@ -1741,7 +1745,7 @@ Interface.prototype.trySpawn = function(cb) {
function connectError() {
// If it's failed to connect 10 times then print failed message
if (connectionAttempts >= 10) {
- internalUtil.error(' failed to connect, please retry');
+ error(' failed to connect, please retry');
process.exit(1);
}
setTimeout(attemptConnect, 500);
diff --git a/lib/_http_agent.js b/lib/_http_agent.js
index eebdb242463b5d..ace5923b4516ee 100644
--- a/lib/_http_agent.js
+++ b/lib/_http_agent.js
@@ -121,7 +121,7 @@ Agent.prototype.addRequest = function addRequest(req, options) {
}
options = util._extend({}, options);
- options = util._extend(options, this.options);
+ util._extend(options, this.options);
if (!options.servername) {
options.servername = options.host;
@@ -176,7 +176,7 @@ Agent.prototype.addRequest = function addRequest(req, options) {
Agent.prototype.createSocket = function createSocket(req, options, cb) {
var self = this;
options = util._extend({}, options);
- options = util._extend(options, self.options);
+ util._extend(options, self.options);
if (!options.servername) {
options.servername = options.host;
diff --git a/lib/_http_outgoing.js b/lib/_http_outgoing.js
index 163a00ff9878f4..05c6ecda55dece 100644
--- a/lib/_http_outgoing.js
+++ b/lib/_http_outgoing.js
@@ -32,6 +32,10 @@ const automaticHeaders = {
};
+// Used to store headers returned by getHeaders()
+function OutgoingHeaders() {}
+OutgoingHeaders.prototype = Object.create(null);
+
var dateCache;
function utcDate() {
if (!dateCache) {
@@ -392,6 +396,37 @@ OutgoingMessage.prototype.getHeader = function getHeader(name) {
};
+// Returns an array of the names of the current outgoing headers.
+OutgoingMessage.prototype.getHeaderNames = function getHeaderNames() {
+ return (this._headers ? Object.keys(this._headers) : []);
+};
+
+
+// Returns a shallow copy of the current outgoing headers.
+OutgoingMessage.prototype.getHeaders = function getHeaders() {
+ const headers = this._headers;
+ const ret = new OutgoingHeaders();
+ if (headers) {
+ const keys = Object.keys(headers);
+ for (var i = 0; i < keys.length; ++i) {
+ const key = keys[i];
+ const val = headers[key];
+ ret[key] = val;
+ }
+ }
+ return ret;
+};
+
+
+OutgoingMessage.prototype.hasHeader = function hasHeader(name) {
+ if (typeof name !== 'string') {
+ throw new TypeError('"name" argument must be a string');
+ }
+
+ return !!(this._headers && this._headers[name.toLowerCase()]);
+};
+
+
OutgoingMessage.prototype.removeHeader = function removeHeader(name) {
if (arguments.length < 1) {
throw new Error('"name" argument is required for removeHeader(name)');
diff --git a/lib/_tls_wrap.js b/lib/_tls_wrap.js
index 83a0f109939dc5..ebd36519cf1525 100644
--- a/lib/_tls_wrap.js
+++ b/lib/_tls_wrap.js
@@ -110,6 +110,13 @@ function requestOCSP(self, hello, ctx, cb) {
if (!ctx)
ctx = self.server._sharedCreds;
+
+ // TLS socket is using a `net.Server` instead of a tls.TLSServer.
+ // Some TLS properties like `server._sharedCreds` will not be present
+ if (!ctx)
+ return cb(null);
+
+ // TODO(indutny): eventually disallow raw `SecureContext`
if (ctx.context)
ctx = ctx.context;
@@ -294,9 +301,9 @@ var proxiedMethods = [
// Proxy HandleWrap, PipeWrap and TCPWrap methods
proxiedMethods.forEach(function(name) {
- tls_wrap.TLSWrap.prototype[name] = function methodProxy() {
+ tls_wrap.TLSWrap.prototype[name] = function methodProxy(...args) {
if (this._parent[name])
- return this._parent[name].apply(this._parent, arguments);
+ return this._parent[name].apply(this._parent, args);
};
});
@@ -344,7 +351,7 @@ TLSSocket.prototype._wrapHandle = function(wrap) {
// Wrap socket's handle
var context = options.secureContext ||
options.credentials ||
- tls.createSecureContext();
+ tls.createSecureContext(options);
res = tls_wrap.wrap(handle._externalStream,
context.context,
!!options.isServer);
@@ -978,19 +985,15 @@ function normalizeConnectArgs(listArgs) {
// the host/port/path args that it knows about, not the tls options.
// This means that options.host overrides a host arg.
if (listArgs[1] !== null && typeof listArgs[1] === 'object') {
- options = util._extend(options, listArgs[1]);
+ util._extend(options, listArgs[1]);
} else if (listArgs[2] !== null && typeof listArgs[2] === 'object') {
- options = util._extend(options, listArgs[2]);
+ util._extend(options, listArgs[2]);
}
return (cb) ? [options, cb] : [options];
}
-exports.connect = function(/* [port,] [host,] [options,] [cb] */) {
- const argsLen = arguments.length;
- var args = new Array(argsLen);
- for (var i = 0; i < argsLen; i++)
- args[i] = arguments[i];
+exports.connect = function(...args /* [port,] [host,] [options,] [cb] */) {
args = normalizeConnectArgs(args);
var options = args[0];
var cb = args[1];
diff --git a/lib/assert.js b/lib/assert.js
index 7f69ba0c60e476..6a09e3f74bce00 100644
--- a/lib/assert.js
+++ b/lib/assert.js
@@ -1,7 +1,3 @@
-// http://wiki.commonjs.org/wiki/Unit_Testing/1.0
-//
-// THIS IS NOT TESTED NOR LIKELY TO WORK OUTSIDE V8!
-//
// Originally from narwhal.js (http://narwhaljs.org)
// Copyright (c) 2009 Thomas Robinson <280north.com>
//
@@ -213,7 +209,7 @@ function _deepEqual(actual, expected, strict, memos) {
}
function isArguments(object) {
- return Object.prototype.toString.call(object) == '[object Arguments]';
+ return Object.prototype.toString.call(object) === '[object Arguments]';
}
function objEquiv(a, b, strict, actualVisitedObjects) {
@@ -294,11 +290,12 @@ assert.notStrictEqual = function notStrictEqual(actual, expected, message) {
};
function expectedException(actual, expected) {
- if (!actual || !expected) {
+ // actual is guaranteed to be an Error object, but we need to check expected.
+ if (!expected) {
return false;
}
- if (Object.prototype.toString.call(expected) == '[object RegExp]') {
+ if (Object.prototype.toString.call(expected) === '[object RegExp]') {
return expected.test(actual);
}
diff --git a/lib/buffer.js b/lib/buffer.js
index 3f47677dd56469..21e24c2980f5e6 100644
--- a/lib/buffer.js
+++ b/lib/buffer.js
@@ -426,10 +426,10 @@ Object.defineProperty(Buffer.prototype, 'offset', {
});
-function slowToString(encoding, start, end) {
+function slowToString(buf, encoding, start, end) {
var loweredCase = false;
- // No need to verify that "this.length <= MAX_UINT32" since it's a read-only
+ // No need to verify that "buf.length <= MAX_UINT32" since it's a read-only
// property of a typed array.
// This behaves neither like String nor Uint8Array in that we set start/end
@@ -438,13 +438,13 @@ function slowToString(encoding, start, end) {
// Section 13.3.3.7 Runtime Semantics: KeyedBindingInitialization.
if (start === undefined || start < 0)
start = 0;
- // Return early if start > this.length. Done here to prevent potential uint32
+ // Return early if start > buf.length. Done here to prevent potential uint32
// coercion fail below.
- if (start > this.length)
+ if (start > buf.length)
return '';
- if (end === undefined || end > this.length)
- end = this.length;
+ if (end === undefined || end > buf.length)
+ end = buf.length;
if (end <= 0)
return '';
@@ -461,27 +461,27 @@ function slowToString(encoding, start, end) {
while (true) {
switch (encoding) {
case 'hex':
- return this.hexSlice(start, end);
+ return buf.hexSlice(start, end);
case 'utf8':
case 'utf-8':
- return this.utf8Slice(start, end);
+ return buf.utf8Slice(start, end);
case 'ascii':
- return this.asciiSlice(start, end);
+ return buf.asciiSlice(start, end);
case 'latin1':
case 'binary':
- return this.latin1Slice(start, end);
+ return buf.latin1Slice(start, end);
case 'base64':
- return this.base64Slice(start, end);
+ return buf.base64Slice(start, end);
case 'ucs2':
case 'ucs-2':
case 'utf16le':
case 'utf-16le':
- return this.ucs2Slice(start, end);
+ return buf.ucs2Slice(start, end);
default:
if (loweredCase)
@@ -493,12 +493,12 @@ function slowToString(encoding, start, end) {
}
-Buffer.prototype.toString = function() {
+Buffer.prototype.toString = function(encoding, start, end) {
let result;
if (arguments.length === 0) {
result = this.utf8Slice(0, this.length);
} else {
- result = slowToString.apply(this, arguments);
+ result = slowToString(this, encoding, start, end);
}
if (result === undefined)
throw new Error('"toString()" failed');
diff --git a/lib/child_process.js b/lib/child_process.js
index 76ec6d3d0f9437..2ae9e3671d23e9 100644
--- a/lib/child_process.js
+++ b/lib/child_process.js
@@ -112,7 +112,8 @@ exports.exec = function(command /*, options, callback*/) {
exports.execFile = function(file /*, args, options, callback*/) {
- var args = [], callback;
+ var args = [];
+ var callback;
var options = {
encoding: 'utf8',
timeout: 0,
@@ -132,7 +133,7 @@ exports.execFile = function(file /*, args, options, callback*/) {
}
if (pos < arguments.length && typeof arguments[pos] === 'object') {
- options = util._extend(options, arguments[pos++]);
+ util._extend(options, arguments[pos++]);
} else if (pos < arguments.length && arguments[pos] == null) {
pos++;
}
@@ -196,9 +197,7 @@ exports.execFile = function(file /*, args, options, callback*/) {
stderr = Buffer.concat(_stderr);
}
- if (ex) {
- // Will be handled later
- } else if (code === 0 && signal === null) {
+ if (!ex && code === 0 && signal === null) {
callback(null, stdout, stderr);
return;
}
diff --git a/lib/dgram.js b/lib/dgram.js
index 43efe4665b15df..69bcf5de10343c 100644
--- a/lib/dgram.js
+++ b/lib/dgram.js
@@ -131,7 +131,7 @@ function replaceHandle(self, newHandle) {
self._handle = newHandle;
}
-Socket.prototype.bind = function(port_ /*, address, callback*/) {
+Socket.prototype.bind = function(port_, address_ /*, callback*/) {
let port = port_;
this._healthCheck();
@@ -141,7 +141,7 @@ Socket.prototype.bind = function(port_ /*, address, callback*/) {
this._bindState = BIND_STATE_BINDING;
- if (typeof arguments[arguments.length - 1] === 'function')
+ if (arguments.length && typeof arguments[arguments.length - 1] === 'function')
this.once('listening', arguments[arguments.length - 1]);
if (port instanceof UDP) {
@@ -158,7 +158,7 @@ Socket.prototype.bind = function(port_ /*, address, callback*/) {
exclusive = !!port.exclusive;
port = port.port;
} else {
- address = typeof arguments[1] === 'function' ? '' : arguments[1];
+ address = typeof address_ === 'function' ? '' : address_;
exclusive = false;
}
diff --git a/lib/dns.js b/lib/dns.js
index cbb994b8f271de..cfa04ed192050f 100644
--- a/lib/dns.js
+++ b/lib/dns.js
@@ -56,15 +56,12 @@ function errnoException(err, syscall, hostname) {
// callback.immediately = true;
// }
function makeAsync(callback) {
- return function asyncCallback() {
+ return function asyncCallback(...args) {
if (asyncCallback.immediately) {
// The API already returned, we can invoke the callback immediately.
- callback.apply(null, arguments);
+ callback.apply(null, args);
} else {
- var args = new Array(arguments.length + 1);
- args[0] = callback;
- for (var i = 0; i < arguments.length; ++i)
- args[i + 1] = arguments[i];
+ args.unshift(callback);
process.nextTick.apply(null, args);
}
};
diff --git a/lib/events.js b/lib/events.js
index 000fa98d5a88c5..ab167bb2fbd44b 100644
--- a/lib/events.js
+++ b/lib/events.js
@@ -153,9 +153,11 @@ EventEmitter.prototype.emit = function emit(type) {
if (domain) {
if (!er)
er = new Error('Uncaught, unspecified "error" event');
- er.domainEmitter = this;
- er.domain = domain;
- er.domainThrown = false;
+ if (typeof er === 'object' && er !== null) {
+ er.domainEmitter = this;
+ er.domain = domain;
+ er.domainThrown = false;
+ }
domain.emit('error', er);
} else if (er instanceof Error) {
throw er; // Unhandled 'error' event
diff --git a/lib/fs.js b/lib/fs.js
index b90dcf9e9487ee..c30de937c2afed 100644
--- a/lib/fs.js
+++ b/lib/fs.js
@@ -132,7 +132,7 @@ function isFd(path) {
}
// Static method to set the stats properties on a Stats object.
-fs.Stats = function(
+function Stats(
dev,
mode,
nlink,
@@ -161,7 +161,8 @@ fs.Stats = function(
this.mtime = new Date(mtim_msec);
this.ctime = new Date(ctim_msec);
this.birthtime = new Date(birthtim_msec);
-};
+}
+fs.Stats = Stats;
// Create a C++ binding to the function which creates a Stats object.
binding.FSInitialize(fs.Stats);
@@ -263,7 +264,7 @@ fs.existsSync = function(path) {
try {
handleError((path = getPathFromURL(path)));
nullCheck(path);
- binding.stat(pathModule._makeLong(path));
+ binding.stat(pathModule._makeLong(path), statValues);
return true;
} catch (e) {
return false;
@@ -938,20 +939,33 @@ fs.stat = function(path, callback) {
binding.stat(pathModule._makeLong(path), req);
};
+const statValues = new Float64Array(14);
+function statsFromValues() {
+ return new Stats(statValues[0], statValues[1], statValues[2], statValues[3],
+ statValues[4], statValues[5],
+ statValues[6] < 0 ? undefined : statValues[6], statValues[7],
+ statValues[8], statValues[9] < 0 ? undefined : statValues[9],
+ statValues[10], statValues[11], statValues[12],
+ statValues[13]);
+}
+
fs.fstatSync = function(fd) {
- return binding.fstat(fd);
+ binding.fstat(fd, statValues);
+ return statsFromValues();
};
fs.lstatSync = function(path) {
handleError((path = getPathFromURL(path)));
nullCheck(path);
- return binding.lstat(pathModule._makeLong(path));
+ binding.lstat(pathModule._makeLong(path), statValues);
+ return statsFromValues();
};
fs.statSync = function(path) {
handleError((path = getPathFromURL(path)));
nullCheck(path);
- return binding.stat(pathModule._makeLong(path));
+ binding.stat(pathModule._makeLong(path), statValues);
+ return statsFromValues();
};
fs.readlink = function(path, options, callback) {
diff --git a/lib/internal/bootstrap_node.js b/lib/internal/bootstrap_node.js
index ccabffd8a6e342..12faa6bf00afb1 100644
--- a/lib/internal/bootstrap_node.js
+++ b/lib/internal/bootstrap_node.js
@@ -38,12 +38,15 @@
_process.setup_hrtime();
_process.setup_cpuUsage();
+ _process.setupMemoryUsage();
_process.setupConfig(NativeModule._source);
NativeModule.require('internal/process/warning').setup();
NativeModule.require('internal/process/next_tick').setup();
NativeModule.require('internal/process/stdio').setup();
_process.setupKillAndExit();
_process.setupSignalHandlers();
+ if (global.__coverage__)
+ NativeModule.require('internal/process/write-coverage').setup();
// Do not initialize channel in debugger agent, it deletes env variable
// and the main thread won't see it.
diff --git a/lib/internal/child_process.js b/lib/internal/child_process.js
index 3017cd5889a037..da6ed289e8c587 100644
--- a/lib/internal/child_process.js
+++ b/lib/internal/child_process.js
@@ -230,12 +230,16 @@ util.inherits(ChildProcess, EventEmitter);
function flushStdio(subprocess) {
- if (subprocess.stdio == null) return;
- subprocess.stdio.forEach(function(stream, fd, stdio) {
+ const stdio = subprocess.stdio;
+
+ if (stdio == null) return;
+
+ for (var i = 0; i < stdio.length; i++) {
+ const stream = stdio[i];
if (!stream || !stream.readable || stream._readableState.readableListening)
- return;
+ continue;
stream.resume();
- });
+ }
}
@@ -268,6 +272,7 @@ ChildProcess.prototype.spawn = function(options) {
const self = this;
var ipc;
var ipcFd;
+ var i;
// If no `stdio` option was given - use default
var stdio = options.stdio || 'pipe';
@@ -302,11 +307,12 @@ ChildProcess.prototype.spawn = function(options) {
if (err !== uv.UV_ENOENT) return err;
} else if (err) {
// Close all opened fds on error
- stdio.forEach(function(stdio) {
- if (stdio.type === 'pipe') {
- stdio.handle.close();
+ for (i = 0; i < stdio.length; i++) {
+ const stream = stdio[i];
+ if (stream.type === 'pipe') {
+ stream.handle.close();
}
- });
+ }
this._handle.close();
this._handle = null;
@@ -315,27 +321,29 @@ ChildProcess.prototype.spawn = function(options) {
this.pid = this._handle.pid;
- stdio.forEach(function(stdio, i) {
- if (stdio.type === 'ignore') return;
+ for (i = 0; i < stdio.length; i++) {
+ const stream = stdio[i];
+ if (stream.type === 'ignore') continue;
- if (stdio.ipc) {
+ if (stream.ipc) {
self._closesNeeded++;
- return;
+ continue;
}
- if (stdio.handle) {
+ if (stream.handle) {
// when i === 0 - we're dealing with stdin
// (which is the only one writable pipe)
- stdio.socket = createSocket(self.pid !== 0 ? stdio.handle : null, i > 0);
+ stream.socket = createSocket(self.pid !== 0 ?
+ stream.handle : null, i > 0);
if (i > 0 && self.pid !== 0) {
self._closesNeeded++;
- stdio.socket.on('close', function() {
+ stream.socket.on('close', function() {
maybeClose(self);
});
}
}
- });
+ }
this.stdin = stdio.length >= 1 && stdio[0].socket !== undefined ?
stdio[0].socket : null;
@@ -407,6 +415,24 @@ ChildProcess.prototype.unref = function() {
if (this._handle) this._handle.unref();
};
+class Control extends EventEmitter {
+ constructor(channel) {
+ super();
+ this.channel = channel;
+ this.refs = 0;
+ }
+ ref() {
+ if (++this.refs === 1) {
+ this.channel.ref();
+ }
+ }
+ unref() {
+ if (--this.refs === 0) {
+ this.channel.unref();
+ this.emit('unref');
+ }
+ }
+}
function setupChannel(target, channel) {
target.channel = channel;
@@ -421,24 +447,7 @@ function setupChannel(target, channel) {
target._handleQueue = null;
target._pendingHandle = null;
- const control = new class extends EventEmitter {
- constructor() {
- super();
- this.channel = channel;
- this.refs = 0;
- }
- ref() {
- if (++this.refs === 1) {
- this.channel.ref();
- }
- }
- unref() {
- if (--this.refs === 0) {
- this.channel.unref();
- this.emit('unref');
- }
- }
- }();
+ const control = new Control(channel);
var decoder = new StringDecoder('utf8');
var jsonBuffer = '';
@@ -796,11 +805,11 @@ function _validateStdio(stdio, sync) {
}
// Defaults
- if (stdio === null || stdio === undefined) {
+ if (stdio == null) {
stdio = i < 3 ? 'pipe' : 'ignore';
}
- if (stdio === null || stdio === 'ignore') {
+ if (stdio === 'ignore') {
acc.push({type: 'ignore'});
} else if (stdio === 'pipe' || typeof stdio === 'number' && stdio < 0) {
var a = {
@@ -886,7 +895,7 @@ function getSocketList(type, slave, key) {
function maybeClose(subprocess) {
subprocess._closesGot++;
- if (subprocess._closesGot == subprocess._closesNeeded) {
+ if (subprocess._closesGot === subprocess._closesNeeded) {
subprocess.emit('close', subprocess.exitCode, subprocess.signalCode);
}
}
diff --git a/lib/internal/cluster/master.js b/lib/internal/cluster/master.js
index 9d5062f5427ffa..af421a04183ae8 100644
--- a/lib/internal/cluster/master.js
+++ b/lib/internal/cluster/master.js
@@ -48,8 +48,8 @@ cluster.setupMaster = function(options) {
execArgv: process.execArgv,
silent: false
};
- settings = util._extend(settings, cluster.settings);
- settings = util._extend(settings, options || {});
+ util._extend(settings, cluster.settings);
+ util._extend(settings, options || {});
// Tell V8 to write profile data for each process to a separate file.
// Without --logfile=v8-%p.log, everything ends up in a single, unusable
@@ -110,12 +110,12 @@ function createWorkerProcess(id, env) {
var execArgv = cluster.settings.execArgv.slice();
var debugPort = 0;
- workerEnv = util._extend(workerEnv, env);
+ util._extend(workerEnv, env);
workerEnv.NODE_UNIQUE_ID = '' + id;
for (var i = 0; i < execArgv.length; i++) {
const match = execArgv[i].match(
- /^(--inspect|--debug|--debug-(brk|port))(=\d+)?$/
+ /^(--inspect|--inspect-(brk|port)|--debug|--debug-(brk|port))(=\d+)?$/
);
if (match) {
diff --git a/lib/internal/errors.js b/lib/internal/errors.js
new file mode 100644
index 00000000000000..f2376f70371c60
--- /dev/null
+++ b/lib/internal/errors.js
@@ -0,0 +1,88 @@
+'use strict';
+
+// The whole point behind this internal module is to allow Node.js to no
+// longer be forced to treat every error message change as a semver-major
+// change. The NodeError classes here all expose a `code` property whose
+// value statically and permanently identifies the error. While the error
+// message may change, the code should not.
+
+const kCode = Symbol('code');
+const messages = new Map();
+
+var assert, util;
+function lazyAssert() {
+ if (!assert)
+ assert = require('assert');
+ return assert;
+}
+
+function lazyUtil() {
+ if (!util)
+ util = require('util');
+ return util;
+}
+
+function makeNodeError(Base) {
+ return class NodeError extends Base {
+ constructor(key, ...args) {
+ super(message(key, args));
+ this[kCode] = key;
+ Error.captureStackTrace(this, NodeError);
+ }
+
+ get name() {
+ return `${super.name}[${this[kCode]}]`;
+ }
+
+ get code() {
+ return this[kCode];
+ }
+ };
+}
+
+function message(key, args) {
+ const assert = lazyAssert();
+ assert.strictEqual(typeof key, 'string');
+ const util = lazyUtil();
+ const msg = messages.get(key);
+ assert(msg, `An invalid error message key was used: ${key}.`);
+ let fmt = util.format;
+ if (typeof msg === 'function') {
+ fmt = msg;
+ } else {
+ if (args === undefined || args.length === 0)
+ return msg;
+ args.unshift(msg);
+ }
+ return String(fmt.apply(null, args));
+}
+
+// Utility function for registering the error codes. Only used here. Exported
+// *only* to allow for testing.
+function E(sym, val) {
+ messages.set(sym, typeof val === 'function' ? val : String(val));
+}
+
+module.exports = exports = {
+ message,
+ Error: makeNodeError(Error),
+ TypeError: makeNodeError(TypeError),
+ RangeError: makeNodeError(RangeError),
+ E // This is exported only to facilitate testing.
+};
+
+// To declare an error message, use the E(sym, val) function above. The sym
+// must be an upper case string. The val can be either a function or a string.
+// The return value of the function must be a string.
+// Examples:
+// E('EXAMPLE_KEY1', 'This is the error value');
+// E('EXAMPLE_KEY2', (a, b) => return `${a} ${b}`);
+//
+// Once an error code has been assigned, the code itself MUST NOT change and
+// any given error code must never be reused to identify a different error.
+//
+// Any error code added here should also be added to the documentation
+//
+// Note: Please try to keep these in alphabetical order
+E('ERR_ASSERTION', (msg) => msg);
+// Add new errors from here...
diff --git a/lib/internal/process.js b/lib/internal/process.js
index 441fb722edb153..5334d8c58bc0a2 100644
--- a/lib/internal/process.js
+++ b/lib/internal/process.js
@@ -11,6 +11,7 @@ function lazyConstants() {
exports.setup_cpuUsage = setup_cpuUsage;
exports.setup_hrtime = setup_hrtime;
+exports.setupMemoryUsage = setupMemoryUsage;
exports.setupConfig = setupConfig;
exports.setupKillAndExit = setupKillAndExit;
exports.setupSignalHandlers = setupSignalHandlers;
@@ -73,7 +74,9 @@ function setup_cpuUsage() {
};
}
-
+// The 3 entries filled in by the original process.hrtime contains
+// the upper/lower 32 bits of the second part of the value,
+// and the remaining nanoseconds of the value.
function setup_hrtime() {
const _hrtime = process.hrtime;
const hrValues = new Uint32Array(3);
@@ -98,6 +101,20 @@ function setup_hrtime() {
};
}
+function setupMemoryUsage() {
+ const memoryUsage_ = process.memoryUsage;
+ const memValues = new Float64Array(4);
+
+ process.memoryUsage = function memoryUsage() {
+ memoryUsage_(memValues);
+ return {
+ rss: memValues[0],
+ heapTotal: memValues[1],
+ heapUsed: memValues[2],
+ external: memValues[3]
+ };
+ };
+}
function setupConfig(_source) {
// NativeModule._source
@@ -122,7 +139,7 @@ function setupConfig(_source) {
const oldV8BreakIterator = Intl.v8BreakIterator;
const des = Object.getOwnPropertyDescriptor(Intl, 'v8BreakIterator');
des.value = require('internal/util').deprecate(function v8BreakIterator() {
- if (processConfig.hasSmallICU && !process.icu_data_dir) {
+ if (processConfig.hasSmallICU && !processConfig.icuDataDir) {
// Intl.v8BreakIterator() would crash w/ fatal error, so throw instead.
throw new Error('v8BreakIterator: full ICU data not installed. ' +
'See https://github.com/nodejs/node/wiki/Intl');
@@ -131,8 +148,6 @@ function setupConfig(_source) {
}, 'Intl.v8BreakIterator is deprecated and will be removed soon.');
Object.defineProperty(Intl, 'v8BreakIterator', des);
}
- // Don’t let icu_data_dir leak through.
- delete process.icu_data_dir;
}
diff --git a/lib/internal/process/next_tick.js b/lib/internal/process/next_tick.js
index f27ef622a96e6a..ad635aaf494b33 100644
--- a/lib/internal/process/next_tick.js
+++ b/lib/internal/process/next_tick.js
@@ -1,5 +1,11 @@
'use strict';
+// This value is used to prevent the nextTickQueue from becoming too
+// large and cause the process to run out of memory. When this value
+// is reached the nextTimeQueue array will be shortend (see tickDone
+// for details).
+const kMaxCallbacksPerLoop = 1e4;
+
exports.setup = setupNextTick;
function setupNextTick() {
@@ -96,7 +102,7 @@ function setupNextTick() {
// callback invocation with small numbers of arguments to avoid the
// performance hit associated with using `fn.apply()`
_combinedTickCallback(args, callback);
- if (1e4 < tickInfo[kIndex])
+ if (kMaxCallbacksPerLoop < tickInfo[kIndex])
tickDone();
}
tickDone();
@@ -120,7 +126,7 @@ function setupNextTick() {
// callback invocation with small numbers of arguments to avoid the
// performance hit associated with using `fn.apply()`
_combinedTickCallback(args, callback);
- if (1e4 < tickInfo[kIndex])
+ if (kMaxCallbacksPerLoop < tickInfo[kIndex])
tickDone();
if (domain)
domain.exit();
diff --git a/lib/internal/process/write-coverage.js b/lib/internal/process/write-coverage.js
new file mode 100644
index 00000000000000..6bbc59a6e981be
--- /dev/null
+++ b/lib/internal/process/write-coverage.js
@@ -0,0 +1,46 @@
+'use strict';
+const process = require('process');
+const path = require('path');
+const fs = require('fs');
+const mkdirSync = fs.mkdirSync;
+const writeFileSync = fs.writeFileSync;
+
+var isWritingCoverage = false;
+function writeCoverage() {
+ if (isWritingCoverage || !global.__coverage__) {
+ return;
+ }
+ isWritingCoverage = true;
+
+ const dirname = path.join(path.dirname(process.execPath), '.coverage');
+ const filename = `coverage-${process.pid}-${Date.now()}.json`;
+ try {
+ mkdirSync(dirname);
+ } catch (err) {
+ if (err.code !== 'EEXIST') {
+ console.error(err);
+ return;
+ }
+ }
+
+ const target = path.join(dirname, filename);
+ const coverageInfo = JSON.stringify(global.__coverage__);
+ try {
+ writeFileSync(target, coverageInfo);
+ } catch (err) {
+ console.error(err);
+ }
+}
+
+function setup() {
+ const reallyReallyExit = process.reallyExit;
+
+ process.reallyExit = function(code) {
+ writeCoverage();
+ reallyReallyExit(code);
+ };
+
+ process.on('exit', writeCoverage);
+}
+
+exports.setup = setup;
diff --git a/lib/internal/querystring.js b/lib/internal/querystring.js
new file mode 100644
index 00000000000000..2f8d77d3e9d2e7
--- /dev/null
+++ b/lib/internal/querystring.js
@@ -0,0 +1,15 @@
+'use strict';
+
+const hexTable = new Array(256);
+for (var i = 0; i < 256; ++i)
+ hexTable[i] = '%' + ((i < 16 ? '0' : '') + i.toString(16)).toUpperCase();
+
+// Instantiating this is faster than explicitly calling `Object.create(null)`
+// to get a "clean" empty object (tested with v8 v4.9).
+function StorageObject() {}
+StorageObject.prototype = Object.create(null);
+
+module.exports = {
+ hexTable,
+ StorageObject
+};
diff --git a/lib/internal/url.js b/lib/internal/url.js
index b79cc2303592a2..630c69d2cbaf81 100644
--- a/lib/internal/url.js
+++ b/lib/internal/url.js
@@ -1,6 +1,7 @@
'use strict';
const util = require('util');
+const { StorageObject } = require('internal/querystring');
const binding = process.binding('url');
const context = Symbol('context');
const cannotBeBase = Symbol('cannot-be-base');
@@ -22,9 +23,6 @@ const IteratorPrototype = Object.getPrototypeOf(
Object.getPrototypeOf([][Symbol.iterator]())
);
-function StorageObject() {}
-StorageObject.prototype = Object.create(null);
-
class OpaqueOrigin {
toString() {
return 'null';
@@ -97,12 +95,11 @@ function onParseComplete(flags, protocol, username, password,
ctx.query = query;
ctx.fragment = fragment;
ctx.host = host;
- if (this[searchParams]) { // invoked from href setter
- initSearchParams(this[searchParams], query);
- } else {
- this[searchParams] = new URLSearchParams(query);
+ if (!this[searchParams]) { // invoked from URL constructor
+ this[searchParams] = new URLSearchParams();
+ this[searchParams][context] = this;
}
- this[searchParams][context] = this;
+ initSearchParams(this[searchParams], query);
}
// Reused by URL constructor and URL#href setter.
@@ -525,75 +522,17 @@ Object.defineProperties(URL.prototype, {
binding.parse(hash, binding.kFragment, null, ctx,
onParseHashComplete.bind(this));
}
- }
-});
-
-const hexTable = new Array(256);
-
-for (var i = 0; i < 256; ++i)
- hexTable[i] = '%' + ((i < 16 ? '0' : '') + i.toString(16)).toUpperCase();
-function encodeAuth(str) {
- // faster encodeURIComponent alternative for encoding auth uri components
- var out = '';
- var lastPos = 0;
- for (var i = 0; i < str.length; ++i) {
- var c = str.charCodeAt(i);
-
- // These characters do not need escaping:
- // ! - . _ ~
- // ' ( ) * :
- // digits
- // alpha (uppercase)
- // alpha (lowercase)
- if (c === 0x21 || c === 0x2D || c === 0x2E || c === 0x5F || c === 0x7E ||
- (c >= 0x27 && c <= 0x2A) ||
- (c >= 0x30 && c <= 0x3A) ||
- (c >= 0x41 && c <= 0x5A) ||
- (c >= 0x61 && c <= 0x7A)) {
- continue;
- }
-
- if (i - lastPos > 0)
- out += str.slice(lastPos, i);
-
- lastPos = i + 1;
-
- // Other ASCII characters
- if (c < 0x80) {
- out += hexTable[c];
- continue;
- }
-
- // Multi-byte characters ...
- if (c < 0x800) {
- out += hexTable[0xC0 | (c >> 6)] + hexTable[0x80 | (c & 0x3F)];
- continue;
- }
- if (c < 0xD800 || c >= 0xE000) {
- out += hexTable[0xE0 | (c >> 12)] +
- hexTable[0x80 | ((c >> 6) & 0x3F)] +
- hexTable[0x80 | (c & 0x3F)];
- continue;
+ },
+ toJSON: {
+ writable: true,
+ enumerable: true,
+ configurable: true,
+ // eslint-disable-next-line func-name-matching
+ value: function toJSON() {
+ return this[kFormat]({});
}
- // Surrogate pair
- ++i;
- var c2;
- if (i < str.length)
- c2 = str.charCodeAt(i) & 0x3FF;
- else
- c2 = 0;
- c = 0x10000 + (((c & 0x3FF) << 10) | c2);
- out += hexTable[0xF0 | (c >> 18)] +
- hexTable[0x80 | ((c >> 12) & 0x3F)] +
- hexTable[0x80 | ((c >> 6) & 0x3F)] +
- hexTable[0x80 | (c & 0x3F)];
}
- if (lastPos === 0)
- return str;
- if (lastPos < str.length)
- return out + str.slice(lastPos);
- return out;
-}
+});
function update(url, params) {
if (!url)
@@ -724,6 +663,35 @@ class URLSearchParams {
}
}
+// for merge sort
+function merge(out, start, mid, end, lBuffer, rBuffer) {
+ const sizeLeft = mid - start;
+ const sizeRight = end - mid;
+ var l, r, o;
+
+ for (l = 0; l < sizeLeft; l++)
+ lBuffer[l] = out[start + l];
+ for (r = 0; r < sizeRight; r++)
+ rBuffer[r] = out[mid + r];
+
+ l = 0;
+ r = 0;
+ o = start;
+ while (l < sizeLeft && r < sizeRight) {
+ if (lBuffer[l] <= rBuffer[r]) {
+ out[o++] = lBuffer[l++];
+ out[o++] = lBuffer[l++];
+ } else {
+ out[o++] = rBuffer[r++];
+ out[o++] = rBuffer[r++];
+ }
+ }
+ while (l < sizeLeft)
+ out[o++] = lBuffer[l++];
+ while (r < sizeRight)
+ out[o++] = rBuffer[r++];
+}
+
defineIDLClass(URLSearchParams.prototype, 'URLSearchParams', {
append(name, value) {
if (!this || !(this instanceof URLSearchParams)) {
@@ -855,6 +823,51 @@ defineIDLClass(URLSearchParams.prototype, 'URLSearchParams', {
update(this[context], this);
},
+ sort() {
+ const a = this[searchParams];
+ const len = a.length;
+ if (len <= 2) {
+ return;
+ }
+
+ // arbitrary number found through testing
+ if (len < 100) {
+ // Simple stable in-place insertion sort
+ // Derived from v8/src/js/array.js
+ for (var i = 2; i < len; i += 2) {
+ var curKey = a[i];
+ var curVal = a[i + 1];
+ var j;
+ for (j = i - 2; j >= 0; j -= 2) {
+ if (a[j] > curKey) {
+ a[j + 2] = a[j];
+ a[j + 3] = a[j + 1];
+ } else {
+ break;
+ }
+ }
+ a[j + 2] = curKey;
+ a[j + 3] = curVal;
+ }
+ } else {
+ // Bottom-up iterative stable merge sort
+ const lBuffer = new Array(len);
+ const rBuffer = new Array(len);
+ for (var step = 2; step < len; step *= 2) {
+ for (var start = 0; start < len - 2; start += 2 * step) {
+ var mid = start + step;
+ var end = mid + step;
+ end = end < len ? end : len;
+ if (mid > end)
+ continue;
+ merge(a, start, mid, end, lBuffer, rBuffer);
+ }
+ }
+ }
+
+ update(this[context], this);
+ },
+
// https://heycam.github.io/webidl/#es-iterators
// Define entries here rather than [Symbol.iterator] as the function name
// must be set to `entries`.
@@ -1138,6 +1151,6 @@ exports.URL = URL;
exports.URLSearchParams = URLSearchParams;
exports.domainToASCII = domainToASCII;
exports.domainToUnicode = domainToUnicode;
-exports.encodeAuth = encodeAuth;
exports.urlToOptions = urlToOptions;
exports.formatSymbol = kFormat;
+exports.searchParamsSymbol = searchParams;
diff --git a/lib/internal/util.js b/lib/internal/util.js
index 42a0922d304970..b4a938f9c22a89 100644
--- a/lib/internal/util.js
+++ b/lib/internal/util.js
@@ -6,32 +6,10 @@ const prefix = `(${process.release.name}:${process.pid}) `;
const kArrowMessagePrivateSymbolIndex = binding['arrow_message_private_symbol'];
const kDecoratedPrivateSymbolIndex = binding['decorated_private_symbol'];
-exports.getHiddenValue = binding.getHiddenValue;
-exports.setHiddenValue = binding.setHiddenValue;
-
// The `buffer` module uses this. Defining it here instead of in the public
// `util` module makes it accessible without having to `require('util')` there.
exports.customInspectSymbol = Symbol('util.inspect.custom');
-// All the internal deprecations have to use this function only, as this will
-// prepend the prefix to the actual message.
-exports.deprecate = function(fn, msg) {
- return exports._deprecate(fn, msg);
-};
-
-exports.error = function(msg) {
- const fmt = `${prefix}${msg}`;
- if (arguments.length > 1) {
- const args = new Array(arguments.length);
- args[0] = fmt;
- for (var i = 1; i < arguments.length; ++i)
- args[i] = arguments[i];
- console.error.apply(console, args);
- } else {
- console.error(fmt);
- }
-};
-
exports.trace = function(msg) {
console.trace(`${prefix}${msg}`);
};
@@ -39,11 +17,11 @@ exports.trace = function(msg) {
// Mark that a method should not be used.
// Returns a modified function which warns once by default.
// If --no-deprecation is set, then it is a no-op.
-exports._deprecate = function(fn, msg) {
+exports.deprecate = function deprecate(fn, msg, code) {
// Allow for deprecating things in the process of starting up.
if (global.process === undefined) {
return function() {
- return exports._deprecate(fn, msg).apply(this, arguments);
+ return exports.deprecate(fn, msg, code).apply(this, arguments);
};
}
@@ -77,14 +55,14 @@ exports._deprecate = function(fn, msg) {
exports.decorateErrorStack = function decorateErrorStack(err) {
if (!(exports.isError(err) && err.stack) ||
- exports.getHiddenValue(err, kDecoratedPrivateSymbolIndex) === true)
+ binding.getHiddenValue(err, kDecoratedPrivateSymbolIndex) === true)
return;
- const arrow = exports.getHiddenValue(err, kArrowMessagePrivateSymbolIndex);
+ const arrow = binding.getHiddenValue(err, kArrowMessagePrivateSymbolIndex);
if (arrow) {
err.stack = arrow + err.stack;
- exports.setHiddenValue(err, kDecoratedPrivateSymbolIndex, true);
+ binding.setHiddenValue(err, kDecoratedPrivateSymbolIndex, true);
}
};
diff --git a/lib/net.js b/lib/net.js
index e4f97ab80debd5..2528daedc84ef5 100644
--- a/lib/net.js
+++ b/lib/net.js
@@ -146,6 +146,9 @@ function Socket(options) {
} else if (options.fd !== undefined) {
this._handle = createHandle(options.fd);
this._handle.open(options.fd);
+ // options.fd can be string (since it user-defined),
+ // so changing this to === would be semver-major
+ // See: https://github.com/nodejs/node/pull/11513
if ((options.fd == 1 || options.fd == 2) &&
(this._handle instanceof Pipe) &&
process.platform === 'win32') {
@@ -1069,7 +1072,7 @@ function afterConnect(status, handle, req, readable, writable) {
self.connecting = false;
self._sockname = null;
- if (status == 0) {
+ if (status === 0) {
self.readable = readable;
self.writable = writable;
self._unrefTimer();
diff --git a/lib/os.js b/lib/os.js
index a59cc49bae8c51..6d8ebd1ca55659 100644
--- a/lib/os.js
+++ b/lib/os.js
@@ -1,12 +1,12 @@
'use strict';
const binding = process.binding('os');
+const getLoadAvg = binding.getLoadAvg;
const constants = process.binding('constants').os;
const internalUtil = require('internal/util');
const isWindows = process.platform === 'win32';
exports.hostname = binding.getHostname;
-exports.loadavg = binding.getLoadAvg;
exports.uptime = binding.getUptime;
exports.freemem = binding.getFreeMem;
exports.totalmem = binding.getTotalMem;
@@ -17,6 +17,12 @@ exports.networkInterfaces = binding.getInterfaceAddresses;
exports.homedir = binding.getHomeDirectory;
exports.userInfo = binding.getUserInfo;
+const avgValues = new Float64Array(3);
+exports.loadavg = function loadavg() {
+ getLoadAvg(avgValues);
+ return [avgValues[0], avgValues[1], avgValues[2]];
+};
+
Object.defineProperty(exports, 'constants', {
configurable: false,
enumerable: true,
diff --git a/lib/querystring.js b/lib/querystring.js
index 5ccb5fa77b320f..8360857ba4bfce 100644
--- a/lib/querystring.js
+++ b/lib/querystring.js
@@ -1,5 +1,7 @@
'use strict';
+const { Buffer } = require('buffer');
+const { StorageObject, hexTable } = require('internal/querystring');
const QueryString = module.exports = {
unescapeBuffer,
// `unescape()` is a JS global, so we need to use a different local name
@@ -14,13 +16,6 @@ const QueryString = module.exports = {
parse,
decode: parse
};
-const Buffer = require('buffer').Buffer;
-
-// This constructor is used to store parsed query string values. Instantiating
-// this is faster than explicitly calling `Object.create(null)` to get a
-// "clean" empty object (tested with v8 v4.9).
-function ParsedQueryString() {}
-ParsedQueryString.prototype = Object.create(null);
const unhexTable = [
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 0 - 15
@@ -116,10 +111,6 @@ function qsUnescape(s, decodeSpaces) {
}
-const hexTable = [];
-for (var i = 0; i < 256; ++i)
- hexTable[i] = '%' + ((i < 16 ? '0' : '') + i.toString(16)).toUpperCase();
-
// These characters do not need escaping when generating query strings:
// ! - . _ ~
// ' ( ) *
@@ -263,7 +254,7 @@ const defEqCodes = [61]; // =
// Parse a key/val string.
function parse(qs, sep, eq, options) {
- const obj = new ParsedQueryString();
+ const obj = new StorageObject();
if (typeof qs !== 'string' || qs.length === 0) {
return obj;
diff --git a/lib/readline.js b/lib/readline.js
index f1b9db46af6ee1..a571657a726d2a 100644
--- a/lib/readline.js
+++ b/lib/readline.js
@@ -427,9 +427,9 @@ Interface.prototype._tabComplete = function(lastKeypressWasTab) {
if (!maxColumns || maxColumns === Infinity) {
maxColumns = 1;
}
- var group = [], c;
+ var group = [];
for (var i = 0, compLen = completions.length; i < compLen; i++) {
- c = completions[i];
+ var c = completions[i];
if (c === '') {
handleGroup(self, group, width, maxColumns);
group = [];
diff --git a/lib/repl.js b/lib/repl.js
index 3852e24aed8da8..72fd37e0a4a08f 100644
--- a/lib/repl.js
+++ b/lib/repl.js
@@ -266,7 +266,9 @@ function REPLServer(prompt,
code = code.replace(/\n$/, '');
code = preprocess(code);
- var err, result, retry = false, input = code, wrappedErr;
+ var retry = false;
+ var input = code;
+ var err, result, wrappedErr;
// first, create the Script object to check the syntax
if (code === '\n')
diff --git a/lib/timers.js b/lib/timers.js
index 3490baa0e5730a..0784f7f1e11247 100644
--- a/lib/timers.js
+++ b/lib/timers.js
@@ -156,12 +156,19 @@ function TimersList(msecs, unrefed) {
this._timer = new TimerWrap();
this._unrefed = unrefed;
this.msecs = msecs;
+ this.nextTick = false;
}
function listOnTimeout() {
var list = this._list;
var msecs = list.msecs;
+ if (list.nextTick) {
+ list.nextTick = false;
+ process.nextTick(listOnTimeoutNT, list);
+ return;
+ }
+
debug('timeout callback %d', msecs);
var now = TimerWrap.now();
@@ -239,6 +246,14 @@ function tryOnTimeout(timer, list) {
} finally {
if (!threw) return;
+ // Postpone all later list events to next tick. We need to do this
+ // so that the events are called in the order they were created.
+ const lists = list._unrefed === true ? unrefedLists : refedLists;
+ for (var key in lists) {
+ if (key > list.msecs) {
+ lists[key].nextTick = true;
+ }
+ }
// We need to continue processing after domain error handling
// is complete, but not by using whatever domain was left over
// when the timeout threw its exception.
diff --git a/lib/tty.js b/lib/tty.js
index 576144e4013064..f9b8a34e95d97a 100644
--- a/lib/tty.js
+++ b/lib/tty.js
@@ -55,7 +55,7 @@ function WriteStream(fd) {
// Ref: https://github.com/nodejs/node/pull/1771#issuecomment-119351671
this._handle.setBlocking(process.env.NODE_TTY_UNSAFE_ASYNC !== '1');
- var winSize = [];
+ var winSize = new Array(2);
var err = this._handle.getWindowSize(winSize);
if (!err) {
this.columns = winSize[0];
@@ -72,7 +72,7 @@ WriteStream.prototype.isTTY = true;
WriteStream.prototype._refreshSize = function() {
var oldCols = this.columns;
var oldRows = this.rows;
- var winSize = [];
+ var winSize = new Array(2);
var err = this._handle.getWindowSize(winSize);
if (err) {
this.emit('error', errnoException(err, 'getWindowSize'));
diff --git a/lib/url.js b/lib/url.js
index 9164d5991b7a67..845f22a44447d6 100644
--- a/lib/url.js
+++ b/lib/url.js
@@ -10,8 +10,8 @@ function importPunycode() {
const { toASCII } = importPunycode();
+const { StorageObject, hexTable } = require('internal/querystring');
const internalUrl = require('internal/url');
-const encodeAuth = internalUrl.encodeAuth;
exports.parse = urlParse;
exports.resolve = urlResolve;
exports.resolveObject = urlResolveObject;
@@ -76,12 +76,6 @@ const slashedProtocol = {
};
const querystring = require('querystring');
-// This constructor is used to store parsed query string values. Instantiating
-// this is faster than explicitly calling `Object.create(null)` to get a
-// "clean" empty object (tested with v8 v4.9).
-function ParsedQueryString() {}
-ParsedQueryString.prototype = Object.create(null);
-
function urlParse(url, parseQueryString, slashesDenoteHost) {
if (url instanceof Url) return url;
@@ -190,7 +184,7 @@ Url.prototype.parse = function(url, parseQueryString, slashesDenoteHost) {
}
} else if (parseQueryString) {
this.search = '';
- this.query = new ParsedQueryString();
+ this.query = new StorageObject();
}
return this;
}
@@ -380,7 +374,7 @@ Url.prototype.parse = function(url, parseQueryString, slashesDenoteHost) {
} else if (parseQueryString) {
// no query string, but parseQueryString still requested
this.search = '';
- this.query = new ParsedQueryString();
+ this.query = new StorageObject();
}
var firstIdx = (questionIdx !== -1 &&
@@ -959,3 +953,75 @@ function spliceOne(list, index) {
list[i] = list[k];
list.pop();
}
+
+// These characters do not need escaping:
+// ! - . _ ~
+// ' ( ) * :
+// digits
+// alpha (uppercase)
+// alpha (lowercase)
+const noEscapeAuth = [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0x00 - 0x0F
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0x10 - 0x1F
+ 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, // 0x20 - 0x2F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, // 0x30 - 0x3F
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0x40 - 0x4F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, // 0x50 - 0x5F
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0x60 - 0x6F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0 // 0x70 - 0x7F
+];
+
+function encodeAuth(str) {
+ // faster encodeURIComponent alternative for encoding auth uri components
+ var out = '';
+ var lastPos = 0;
+ for (var i = 0; i < str.length; ++i) {
+ var c = str.charCodeAt(i);
+
+ // ASCII
+ if (c < 0x80) {
+ if (noEscapeAuth[c] === 1)
+ continue;
+ if (lastPos < i)
+ out += str.slice(lastPos, i);
+ lastPos = i + 1;
+ out += hexTable[c];
+ continue;
+ }
+
+ if (lastPos < i)
+ out += str.slice(lastPos, i);
+
+ // Multi-byte characters ...
+ if (c < 0x800) {
+ lastPos = i + 1;
+ out += hexTable[0xC0 | (c >> 6)] + hexTable[0x80 | (c & 0x3F)];
+ continue;
+ }
+ if (c < 0xD800 || c >= 0xE000) {
+ lastPos = i + 1;
+ out += hexTable[0xE0 | (c >> 12)] +
+ hexTable[0x80 | ((c >> 6) & 0x3F)] +
+ hexTable[0x80 | (c & 0x3F)];
+ continue;
+ }
+ // Surrogate pair
+ ++i;
+ var c2;
+ if (i < str.length)
+ c2 = str.charCodeAt(i) & 0x3FF;
+ else
+ c2 = 0;
+ lastPos = i + 1;
+ c = 0x10000 + (((c & 0x3FF) << 10) | c2);
+ out += hexTable[0xF0 | (c >> 18)] +
+ hexTable[0x80 | ((c >> 12) & 0x3F)] +
+ hexTable[0x80 | ((c >> 6) & 0x3F)] +
+ hexTable[0x80 | (c & 0x3F)];
+ }
+ if (lastPos === 0)
+ return str;
+ if (lastPos < str.length)
+ return out + str.slice(lastPos);
+ return out;
+}
diff --git a/lib/util.js b/lib/util.js
index ca07dff94909f8..64c4c94c29a81f 100644
--- a/lib/util.js
+++ b/lib/util.js
@@ -131,7 +131,7 @@ exports.format = function(f) {
};
-exports.deprecate = internalUtil._deprecate;
+exports.deprecate = internalUtil.deprecate;
var debugs = {};
@@ -449,8 +449,10 @@ function formatValue(ctx, value, recurseTimes) {
}
}
- var base = '', empty = false, braces;
+ var base = '';
+ var empty = false;
var formatter = formatObject;
+ var braces;
// We can't compare constructors for various objects using a comparison like
// `constructor === Array` because the object could have come from a different
@@ -604,9 +606,8 @@ function formatValue(ctx, value, recurseTimes) {
function formatNumber(ctx, value) {
- // Format -0 as '-0'. Strict equality won't distinguish 0 from -0,
- // so instead we use the fact that 1 / -0 < 0 whereas 1 / 0 > 0 .
- if (value === 0 && 1 / value < 0)
+ // Format -0 as '-0'. Strict equality won't distinguish 0 from -0.
+ if (Object.is(value, -0))
return ctx.stylize('-0', 'number');
return ctx.stylize('' + value, 'number');
}
diff --git a/lib/vm.js b/lib/vm.js
index cf672fbbc14871..e2b6c60d1a83f6 100644
--- a/lib/vm.js
+++ b/lib/vm.js
@@ -34,17 +34,11 @@ Script.prototype.runInContext = function(contextifiedSandbox, options) {
};
Script.prototype.runInNewContext = function(sandbox, options) {
- var context = exports.createContext(sandbox);
+ var context = createContext(sandbox);
return this.runInContext(context, options);
};
-exports.Script = Script;
-
-exports.createScript = function(code, options) {
- return new Script(code, options);
-};
-
-exports.createContext = function(sandbox) {
+function createContext(sandbox) {
if (sandbox === undefined) {
sandbox = {};
} else if (binding.isContext(sandbox)) {
@@ -53,28 +47,11 @@ exports.createContext = function(sandbox) {
binding.makeContext(sandbox);
return sandbox;
-};
-
-exports.runInDebugContext = function(code) {
- return binding.runInDebugContext(code);
-};
-
-exports.runInContext = function(code, contextifiedSandbox, options) {
- var script = new Script(code, options);
- return script.runInContext(contextifiedSandbox, options);
-};
-
-exports.runInNewContext = function(code, sandbox, options) {
- var script = new Script(code, options);
- return script.runInNewContext(sandbox, options);
-};
-
-exports.runInThisContext = function(code, options) {
- var script = new Script(code, options);
- return script.runInThisContext(options);
-};
+}
-exports.isContext = binding.isContext;
+function createScript(code, options) {
+ return new Script(code, options);
+}
// Remove all SIGINT listeners and re-attach them after the wrapped function
// has executed, so that caught SIGINT are handled by the listeners again.
@@ -100,3 +77,31 @@ function sigintHandlersWrap(fn, thisArg, argsArray) {
}
}
}
+
+function runInDebugContext(code) {
+ return binding.runInDebugContext(code);
+}
+
+function runInContext(code, contextifiedSandbox, options) {
+ return createScript(code, options)
+ .runInContext(contextifiedSandbox, options);
+}
+
+function runInNewContext(code, sandbox, options) {
+ return createScript(code, options).runInNewContext(sandbox, options);
+}
+
+function runInThisContext(code, options) {
+ return createScript(code, options).runInThisContext(options);
+}
+
+module.exports = {
+ Script,
+ createContext,
+ createScript,
+ runInDebugContext,
+ runInContext,
+ runInNewContext,
+ runInThisContext,
+ isContext: binding.isContext
+};
diff --git a/node.gyp b/node.gyp
index 69b26d340d6666..20167458d4c5ef 100644
--- a/node.gyp
+++ b/node.gyp
@@ -82,6 +82,7 @@
'lib/internal/cluster/shared_handle.js',
'lib/internal/cluster/utils.js',
'lib/internal/cluster/worker.js',
+ 'lib/internal/errors.js',
'lib/internal/freelist.js',
'lib/internal/fs.js',
'lib/internal/linkedlist.js',
@@ -92,6 +93,8 @@
'lib/internal/process/stdio.js',
'lib/internal/process/warning.js',
'lib/internal/process.js',
+ 'lib/internal/querystring.js',
+ 'lib/internal/process/write-coverage.js',
'lib/internal/readline.js',
'lib/internal/repl.js',
'lib/internal/socket_list.js',
@@ -149,6 +152,10 @@
],
'sources': [
+ 'src/tracing/agent.cc',
+ 'src/tracing/node_trace_buffer.cc',
+ 'src/tracing/node_trace_writer.cc',
+ 'src/tracing/trace_event.cc',
'src/async-wrap.cc',
'src/cares_wrap.cc',
'src/connection_wrap.cc',
@@ -227,6 +234,7 @@
'src/stream_base.h',
'src/stream_base-inl.h',
'src/stream_wrap.h',
+ 'src/tracing/trace_event.h'
'src/tree.h',
'src/util.h',
'src/util-inl.h',
@@ -382,7 +390,7 @@
['OS in "linux freebsd" and node_shared=="false"', {
'ldflags': [
'-Wl,--whole-archive,'
- '<(PRODUCT_DIR)/obj.target/deps/openssl/'
+ '<(OBJ_DIR)/deps/openssl/'
'<(OPENSSL_PRODUCT)',
'-Wl,--no-whole-archive',
],
diff --git a/src/env.h b/src/env.h
index 581d7e9aef3c67..b3bc79d4ce9d04 100644
--- a/src/env.h
+++ b/src/env.h
@@ -106,7 +106,6 @@ namespace node {
V(exponent_string, "exponent") \
V(exports_string, "exports") \
V(ext_key_usage_string, "ext_key_usage") \
- V(external_string, "external") \
V(external_stream_string, "_externalStream") \
V(family_string, "family") \
V(fatal_exception_string, "_fatalException") \
@@ -117,8 +116,6 @@ namespace node {
V(get_string, "get") \
V(gid_string, "gid") \
V(handle_string, "handle") \
- V(heap_total_string, "heapTotal") \
- V(heap_used_string, "heapUsed") \
V(homedir_string, "homedir") \
V(hostmaster_string, "hostmaster") \
V(ignore_string, "ignore") \
@@ -186,7 +183,6 @@ namespace node {
V(rename_string, "rename") \
V(replacement_string, "replacement") \
V(retry_string, "retry") \
- V(rss_string, "rss") \
V(serial_string, "serial") \
V(scopeid_string, "scopeid") \
V(sent_shutdown_string, "sentShutdown") \
diff --git a/src/node.cc b/src/node.cc
index 0ee0133df41c75..25cd87defcf9e1 100644
--- a/src/node.cc
+++ b/src/node.cc
@@ -38,6 +38,7 @@
#include "req-wrap.h"
#include "req-wrap-inl.h"
#include "string_bytes.h"
+#include "tracing/agent.h"
#include "util.h"
#include "uv.h"
#if NODE_USE_V8_PLATFORM
@@ -151,10 +152,12 @@ static node_module* modpending;
static node_module* modlist_builtin;
static node_module* modlist_linked;
static node_module* modlist_addon;
+static bool trace_enabled = false;
+static const char* trace_enabled_categories = nullptr;
#if defined(NODE_HAVE_I18N_SUPPORT)
// Path to ICU data (for i18n / Intl)
-static const char* icu_data_dir = nullptr;
+std::string icu_data_dir; // NOLINT(runtime/string)
#endif
// used by C++ modules as well
@@ -174,7 +177,7 @@ bool ssl_openssl_cert_store =
bool enable_fips_crypto = false;
bool force_fips_crypto = false;
# endif // NODE_FIPS_MODE
-const char* openssl_config = nullptr;
+std::string openssl_config; // NOLINT(runtime/string)
#endif // HAVE_OPENSSL
// true if process warnings should be suppressed
@@ -195,6 +198,7 @@ static uv_async_t dispatch_debug_messages_async;
static Mutex node_isolate_mutex;
static v8::Isolate* node_isolate;
+static tracing::Agent* tracing_agent;
static node::DebugOptions debug_options;
@@ -203,6 +207,7 @@ static struct {
void Initialize(int thread_pool_size) {
platform_ = v8::platform::CreateDefaultPlatform(thread_pool_size);
V8::InitializePlatform(platform_);
+ tracing::TraceEventHelper::SetCurrentPlatform(platform_);
}
void PumpMessageLoop(Isolate* isolate) {
@@ -901,12 +906,21 @@ Local UVException(Isolate* isolate,
// Look up environment variable unless running as setuid root.
-inline const char* secure_getenv(const char* key) {
+bool SafeGetenv(const char* key, std::string* text) {
#ifndef _WIN32
- if (getuid() != geteuid() || getgid() != getegid())
- return nullptr;
+ // TODO(bnoordhuis) Should perhaps also check whether getauxval(AT_SECURE)
+ // is non-zero on Linux.
+ if (getuid() != geteuid() || getgid() != getegid()) {
+ text->clear();
+ return false;
+ }
#endif
- return getenv(key);
+ if (const char* value = getenv(key)) {
+ *text = value;
+ return true;
+ }
+ text->clear();
+ return false;
}
@@ -2238,25 +2252,22 @@ void MemoryUsage(const FunctionCallbackInfo& args) {
return env->ThrowUVException(err, "uv_resident_set_memory");
}
+ Isolate* isolate = env->isolate();
// V8 memory usage
HeapStatistics v8_heap_stats;
- env->isolate()->GetHeapStatistics(&v8_heap_stats);
-
- Local heap_total =
- Number::New(env->isolate(), v8_heap_stats.total_heap_size());
- Local heap_used =
- Number::New(env->isolate(), v8_heap_stats.used_heap_size());
- Local external_mem =
- Number::New(env->isolate(),
- env->isolate()->AdjustAmountOfExternalAllocatedMemory(0));
+ isolate->GetHeapStatistics(&v8_heap_stats);
- Local info = Object::New(env->isolate());
- info->Set(env->rss_string(), Number::New(env->isolate(), rss));
- info->Set(env->heap_total_string(), heap_total);
- info->Set(env->heap_used_string(), heap_used);
- info->Set(env->external_string(), external_mem);
+ // Get the double array pointer from the Float64Array argument.
+ CHECK(args[0]->IsFloat64Array());
+ Local array = args[0].As();
+ CHECK_EQ(array->Length(), 4);
+ Local ab = array->Buffer();
+ double* fields = static_cast(ab->GetContents().Data());
- args.GetReturnValue().Set(info);
+ fields[0] = rss;
+ fields[1] = v8_heap_stats.total_heap_size();
+ fields[2] = v8_heap_stats.used_heap_size();
+ fields[3] = isolate->AdjustAmountOfExternalAllocatedMemory(0);
}
@@ -2368,8 +2379,6 @@ struct node_module* get_linked_module(const char* name) {
return mp;
}
-typedef void (UV_DYNAMIC* extInit)(Local exports);
-
// DLOpen is process.dlopen(module, filename).
// Used to load 'module.node' dynamically shared objects.
//
@@ -3060,17 +3069,6 @@ void SetupProcessObject(Environment* env,
"ares",
FIXED_ONE_BYTE_STRING(env->isolate(), ARES_VERSION_STR));
-#if defined(NODE_HAVE_I18N_SUPPORT) && defined(U_ICU_VERSION)
- // ICU-related versions are now handled on the js side, see bootstrap_node.js
-
- if (icu_data_dir != nullptr) {
- // Did the user attempt (via env var or parameter) to set an ICU path?
- READONLY_PROPERTY(process,
- "icu_data_dir",
- OneByteString(env->isolate(), icu_data_dir));
- }
-#endif
-
const char node_modules_version[] = NODE_STRINGIFY(NODE_MODULE_VERSION);
READONLY_PROPERTY(
versions,
@@ -3376,6 +3374,9 @@ void SetupProcessObject(Environment* env,
void SignalExit(int signo) {
uv_tty_reset_mode();
+ if (trace_enabled) {
+ tracing_agent->Stop();
+ }
#ifdef __FreeBSD__
// FreeBSD has a nasty bug, see RegisterSignalHandler for details
struct sigaction sa;
@@ -3521,8 +3522,9 @@ static void PrintHelp() {
" --enable-fips enable FIPS crypto at startup\n"
" --force-fips force FIPS crypto (cannot be disabled)\n"
#endif /* NODE_FIPS_MODE */
- " --openssl-config=path load OpenSSL configuration file from\n"
- " the specified path\n"
+ " --openssl-config=file load OpenSSL configuration from the\n"
+ " specified file (overrides\n"
+ " OPENSSL_CONF)\n"
#endif /* HAVE_OPENSSL */
#if defined(NODE_HAVE_I18N_SUPPORT)
" --icu-data-dir=dir set ICU data load path to dir\n"
@@ -3555,6 +3557,8 @@ static void PrintHelp() {
" prefixed to the module search path\n"
"NODE_REPL_HISTORY path to the persistent REPL history\n"
" file\n"
+ "OPENSSL_CONF load OpenSSL configuration from file\n"
+ "\n"
"Documentation can be found at https://nodejs.org/\n");
}
@@ -3659,6 +3663,16 @@ static void ParseArgs(int* argc,
trace_deprecation = true;
} else if (strcmp(arg, "--trace-sync-io") == 0) {
trace_sync_io = true;
+ } else if (strcmp(arg, "--trace-events-enabled") == 0) {
+ trace_enabled = true;
+ } else if (strcmp(arg, "--trace-event-categories") == 0) {
+ const char* categories = argv[index + 1];
+ if (categories == nullptr) {
+ fprintf(stderr, "%s: %s requires an argument\n", argv[0], arg);
+ exit(9);
+ }
+ args_consumed += 1;
+ trace_enabled_categories = categories;
} else if (strcmp(arg, "--track-heap-objects") == 0) {
track_heap_objects = true;
} else if (strcmp(arg, "--throw-deprecation") == 0) {
@@ -3692,11 +3706,11 @@ static void ParseArgs(int* argc,
force_fips_crypto = true;
#endif /* NODE_FIPS_MODE */
} else if (strncmp(arg, "--openssl-config=", 17) == 0) {
- openssl_config = arg + 17;
+ openssl_config.assign(arg + 17);
#endif /* HAVE_OPENSSL */
#if defined(NODE_HAVE_I18N_SUPPORT)
} else if (strncmp(arg, "--icu-data-dir=", 15) == 0) {
- icu_data_dir = arg + 15;
+ icu_data_dir.assign(arg + 15);
#endif
} else if (strcmp(arg, "--expose-internals") == 0 ||
strcmp(arg, "--expose_internals") == 0) {
@@ -4183,10 +4197,15 @@ void Init(int* argc,
#endif
// Allow for environment set preserving symlinks.
- if (auto preserve_symlinks = secure_getenv("NODE_PRESERVE_SYMLINKS")) {
- config_preserve_symlinks = (*preserve_symlinks == '1');
+ {
+ std::string text;
+ config_preserve_symlinks =
+ SafeGetenv("NODE_PRESERVE_SYMLINKS", &text) && text[0] == '1';
}
+ if (openssl_config.empty())
+ SafeGetenv("OPENSSL_CONF", &openssl_config);
+
// Parse a few arguments which are specific to Node.
int v8_argc;
const char** v8_argv;
@@ -4213,12 +4232,11 @@ void Init(int* argc,
#endif
#if defined(NODE_HAVE_I18N_SUPPORT)
- if (icu_data_dir == nullptr) {
- // if the parameter isn't given, use the env variable.
- icu_data_dir = secure_getenv("NODE_ICU_DATA");
- }
+ // If the parameter isn't given, use the env variable.
+ if (icu_data_dir.empty())
+ SafeGetenv("NODE_ICU_DATA", &icu_data_dir);
// Initialize ICU.
- // If icu_data_dir is nullptr here, it will load the 'minimal' data.
+ // If icu_data_dir is empty here, it will load the 'minimal' data.
if (!i18n::InitializeICUDirectory(icu_data_dir)) {
FatalError(nullptr, "Could not initialize ICU "
"(check NODE_ICU_DATA or --icu-data-dir parameters)");
@@ -4483,8 +4501,11 @@ int Start(int argc, char** argv) {
Init(&argc, const_cast(argv), &exec_argc, &exec_argv);
#if HAVE_OPENSSL
- if (const char* extra = secure_getenv("NODE_EXTRA_CA_CERTS"))
- crypto::UseExtraCaCerts(extra);
+ {
+ std::string extra_ca_certs;
+ if (SafeGetenv("NODE_EXTRA_CA_CERTS", &extra_ca_certs))
+ crypto::UseExtraCaCerts(extra_ca_certs);
+ }
#ifdef NODE_FIPS_MODE
// In the case of FIPS builds we should make sure
// the random source is properly initialized first.
@@ -4493,13 +4514,23 @@ int Start(int argc, char** argv) {
// V8 on Windows doesn't have a good source of entropy. Seed it from
// OpenSSL's pool.
V8::SetEntropySource(crypto::EntropySource);
-#endif
+#endif // HAVE_OPENSSL
v8_platform.Initialize(v8_thread_pool_size);
+ // Enable tracing when argv has --trace-events-enabled.
+ if (trace_enabled) {
+ fprintf(stderr, "Warning: Trace event is an experimental feature "
+ "and could change at any time.\n");
+ tracing_agent = new tracing::Agent();
+ tracing_agent->Start(v8_platform.platform_, trace_enabled_categories);
+ }
V8::Initialize();
v8_initialized = true;
const int exit_code =
Start(uv_default_loop(), argc, argv, exec_argc, exec_argv);
+ if (trace_enabled) {
+ tracing_agent->Stop();
+ }
v8_initialized = false;
V8::Dispose();
diff --git a/src/node.h b/src/node.h
index 1255a4af7f11ce..24ebce87c8fdfc 100644
--- a/src/node.h
+++ b/src/node.h
@@ -41,6 +41,7 @@
#include "v8.h" // NOLINT(build/include_order)
#include "node_version.h" // NODE_MODULE_VERSION
+#include "tracing/trace_event.h"
#define NODE_MAKE_VERSION(major, minor, patch) \
((major) * 0x1000 + (minor) * 0x100 + (patch))
diff --git a/src/node_config.cc b/src/node_config.cc
index 401345f6a608be..98b219c3e81d1b 100644
--- a/src/node_config.cc
+++ b/src/node_config.cc
@@ -38,8 +38,11 @@ void InitConfig(Local target,
READONLY_BOOLEAN_PROPERTY("hasSmallICU");
#endif // NODE_HAVE_SMALL_ICU
- if (flag_icu_data_dir)
- READONLY_BOOLEAN_PROPERTY("usingICUDataDir");
+ target->DefineOwnProperty(env->context(),
+ OneByteString(env->isolate(), "icuDataDir"),
+ OneByteString(env->isolate(), icu_data_dir.data()))
+ .FromJust();
+
#endif // NODE_HAVE_I18N_SUPPORT
if (config_preserve_symlinks)
diff --git a/src/node_contextify.cc b/src/node_contextify.cc
index ff66ffdaaa0467..9b0ab4ea262d41 100644
--- a/src/node_contextify.cc
+++ b/src/node_contextify.cc
@@ -261,7 +261,8 @@ class ContextifyContext {
Environment* env = Environment::GetCurrent(args);
if (debug_context.IsEmpty()) {
// Force-load the debug context.
- Debug::GetMirror(args.GetIsolate()->GetCurrentContext(), args[0]);
+ auto dummy_event_listener = [] (const Debug::EventDetails&) {};
+ Debug::SetDebugEventListener(args.GetIsolate(), dummy_event_listener);
debug_context = Debug::GetDebugContext(args.GetIsolate());
CHECK(!debug_context.IsEmpty());
// Ensure that the debug context has an Environment assigned in case
diff --git a/src/node_crypto.cc b/src/node_crypto.cc
index 072a48f40c58dc..d9f056682013b5 100644
--- a/src/node_crypto.cc
+++ b/src/node_crypto.cc
@@ -1813,7 +1813,6 @@ template
void SSLWrap ::LoadSession(const FunctionCallbackInfo& args) {
Base* w;
ASSIGN_OR_RETURN_UNWRAP(&w, args.Holder());
- Environment* env = w->ssl_env();
if (args.Length() >= 1 && Buffer::HasInstance(args[0])) {
ssize_t slen = Buffer::Length(args[0]);
@@ -5881,14 +5880,14 @@ void InitCryptoOnce() {
OPENSSL_no_config();
// --openssl-config=...
- if (openssl_config != nullptr) {
+ if (!openssl_config.empty()) {
OPENSSL_load_builtin_modules();
#ifndef OPENSSL_NO_ENGINE
ENGINE_load_builtin_engines();
#endif
ERR_clear_error();
CONF_modules_load_file(
- openssl_config,
+ openssl_config.c_str(),
nullptr,
CONF_MFLAGS_DEFAULT_SECTION);
int err = ERR_get_error();
diff --git a/src/node_file.cc b/src/node_file.cc
index 0abb88088786ae..99048b223478df 100644
--- a/src/node_file.cc
+++ b/src/node_file.cc
@@ -27,8 +27,10 @@
namespace node {
using v8::Array;
+using v8::ArrayBuffer;
using v8::Context;
using v8::EscapableHandleScope;
+using v8::Float64Array;
using v8::Function;
using v8::FunctionCallbackInfo;
using v8::FunctionTemplate;
@@ -528,6 +530,37 @@ Local BuildStatsObject(Environment* env, const uv_stat_t* s) {
return handle_scope.Escape(stats);
}
+void FillStatsArray(double* fields, const uv_stat_t* s) {
+ fields[0] = s->st_dev;
+ fields[1] = s->st_mode;
+ fields[2] = s->st_nlink;
+ fields[3] = s->st_uid;
+ fields[4] = s->st_gid;
+ fields[5] = s->st_rdev;
+#if defined(__POSIX__)
+ fields[6] = s->st_blksize;
+#else
+ fields[6] = -1;
+#endif
+ fields[7] = s->st_ino;
+ fields[8] = s->st_size;
+#if defined(__POSIX__)
+ fields[9] = s->st_blocks;
+#else
+ fields[9] = -1;
+#endif
+ // Dates.
+#define X(idx, name) \
+ fields[idx] = (static_cast(s->st_##name.tv_sec) * 1000) + \
+ (static_cast(s->st_##name.tv_nsec / 1000000)); \
+
+ X(10, atim)
+ X(11, mtim)
+ X(12, ctim)
+ X(13, birthtim)
+#undef X
+}
+
// Used to speed up module loading. Returns the contents of the file as
// a string or undefined when the file cannot be opened. The speedup
// comes from not creating Error objects on failure.
@@ -612,12 +645,15 @@ static void Stat(const FunctionCallbackInfo& args) {
BufferValue path(env->isolate(), args[0]);
ASSERT_PATH(path)
- if (args[1]->IsObject()) {
- ASYNC_CALL(stat, args[1], UTF8, *path)
- } else {
+ if (args[1]->IsFloat64Array()) {
+ Local array = args[1].As();
+ CHECK_EQ(array->Length(), 14);
+ Local ab = array->Buffer();
+ double* fields = static_cast(ab->GetContents().Data());
SYNC_CALL(stat, *path, *path)
- args.GetReturnValue().Set(
- BuildStatsObject(env, static_cast(SYNC_REQ.ptr)));
+ FillStatsArray(fields, static_cast(SYNC_REQ.ptr));
+ } else if (args[1]->IsObject()) {
+ ASYNC_CALL(stat, args[1], UTF8, *path)
}
}
@@ -630,12 +666,15 @@ static void LStat(const FunctionCallbackInfo& args) {
BufferValue path(env->isolate(), args[0]);
ASSERT_PATH(path)
- if (args[1]->IsObject()) {
- ASYNC_CALL(lstat, args[1], UTF8, *path)
- } else {
+ if (args[1]->IsFloat64Array()) {
+ Local array = args[1].As();
+ CHECK_EQ(array->Length(), 14);
+ Local ab = array->Buffer();
+ double* fields = static_cast(ab->GetContents().Data());
SYNC_CALL(lstat, *path, *path)
- args.GetReturnValue().Set(
- BuildStatsObject(env, static_cast(SYNC_REQ.ptr)));
+ FillStatsArray(fields, static_cast(SYNC_REQ.ptr));
+ } else if (args[1]->IsObject()) {
+ ASYNC_CALL(lstat, args[1], UTF8, *path)
}
}
@@ -649,12 +688,15 @@ static void FStat(const FunctionCallbackInfo& args) {
int fd = args[0]->Int32Value();
- if (args[1]->IsObject()) {
- ASYNC_CALL(fstat, args[1], UTF8, fd)
- } else {
+ if (args[1]->IsFloat64Array()) {
+ Local array = args[1].As();
+ CHECK_EQ(array->Length(), 14);
+ Local ab = array->Buffer();
+ double* fields = static_cast(ab->GetContents().Data());
SYNC_CALL(fstat, 0, fd)
- args.GetReturnValue().Set(
- BuildStatsObject(env, static_cast(SYNC_REQ.ptr)));
+ FillStatsArray(fields, static_cast(SYNC_REQ.ptr));
+ } else if (args[1]->IsObject()) {
+ ASYNC_CALL(fstat, args[1], UTF8, fd)
}
}
diff --git a/src/node_http_parser.cc b/src/node_http_parser.cc
index f757cd6797058d..bc9b5d953e8ebf 100644
--- a/src/node_http_parser.cc
+++ b/src/node_http_parser.cc
@@ -15,7 +15,7 @@
#include // free()
#include // strdup()
-// This is a binding to http_parser (https://github.com/joyent/http-parser)
+// This is a binding to http_parser (https://github.com/nodejs/http-parser)
// The goal is to decouple sockets from parsing for more javascript-level
// agility. A Buffer is read from a socket and passed to parser.execute().
// The parser then issues callbacks with slices of the data
diff --git a/src/node_i18n.cc b/src/node_i18n.cc
index a98fdca4d1bffd..ae14aed7c6b4c2 100644
--- a/src/node_i18n.cc
+++ b/src/node_i18n.cc
@@ -70,41 +70,21 @@ using v8::Object;
using v8::String;
using v8::Value;
-bool flag_icu_data_dir = false;
-
namespace i18n {
-const size_t kStorageSize = 1024;
-
-// TODO(jasnell): This could potentially become a member of MaybeStackBuffer
-// at some point in the future. Care would need to be taken with the
-// MaybeStackBuffer variant below.
-MaybeLocal AsBuffer(Isolate* isolate,
- MaybeStackBuffer* buf,
- size_t len) {
- if (buf->IsAllocated()) {
- MaybeLocal ret = Buffer::New(isolate, buf->out(), len);
- if (!ret.IsEmpty()) buf->Release();
+template
+MaybeLocal ToBufferEndian(Environment* env, MaybeStackBuffer* buf) {
+ MaybeLocal ret = Buffer::New(env, buf);
+ if (ret.IsEmpty())
return ret;
- }
- return Buffer::Copy(isolate, buf->out(), len);
-}
-MaybeLocal AsBuffer(Isolate* isolate,
- MaybeStackBuffer* buf,
- size_t len) {
- char* dst = reinterpret_cast(**buf);
- MaybeLocal ret;
- if (buf->IsAllocated()) {
- ret = Buffer::New(isolate, dst, len);
- if (!ret.IsEmpty()) buf->Release();
- } else {
- ret = Buffer::Copy(isolate, dst, len);
- }
- if (!ret.IsEmpty() && IsBigEndian()) {
- SPREAD_BUFFER_ARG(ret.ToLocalChecked(), buf);
- SwapBytes16(buf_data, buf_length);
+ static_assert(sizeof(T) == 1 || sizeof(T) == 2,
+ "Currently only one- or two-byte buffers are supported");
+ if (sizeof(T) > 1 && IsBigEndian()) {
+ SPREAD_BUFFER_ARG(ret.ToLocalChecked(), retbuf);
+ SwapBytes16(retbuf_data, retbuf_length);
}
+
return ret;
}
@@ -140,14 +120,14 @@ void CopySourceBuffer(MaybeStackBuffer* dest,
}
}
-typedef MaybeLocal (*TranscodeFunc)(Isolate* isolate,
+typedef MaybeLocal (*TranscodeFunc)(Environment* env,
const char* fromEncoding,
const char* toEncoding,
const char* source,
const size_t source_length,
UErrorCode* status);
-MaybeLocal Transcode(Isolate* isolate,
+MaybeLocal Transcode(Environment* env,
const char* fromEncoding,
const char* toEncoding,
const char* source,
@@ -164,12 +144,14 @@ MaybeLocal Transcode(Isolate* isolate,
ucnv_convertEx(to.conv, from.conv, &target, target + limit,
&source, source + source_length, nullptr, nullptr,
nullptr, nullptr, true, true, status);
- if (U_SUCCESS(*status))
- ret = AsBuffer(isolate, &result, target - &result[0]);
+ if (U_SUCCESS(*status)) {
+ result.SetLength(target - &result[0]);
+ ret = ToBufferEndian(env, &result);
+ }
return ret;
}
-MaybeLocal TranscodeToUcs2(Isolate* isolate,
+MaybeLocal TranscodeToUcs2(Environment* env,
const char* fromEncoding,
const char* toEncoding,
const char* source,
@@ -183,11 +165,11 @@ MaybeLocal TranscodeToUcs2(Isolate* isolate,
ucnv_toUChars(from.conv, *destbuf, length_in_chars,
source, source_length, status);
if (U_SUCCESS(*status))
- ret = AsBuffer(isolate, &destbuf, length_in_chars);
+ ret = ToBufferEndian(env, &destbuf);
return ret;
}
-MaybeLocal TranscodeFromUcs2(Isolate* isolate,
+MaybeLocal TranscodeFromUcs2(Environment* env,
const char* fromEncoding,
const char* toEncoding,
const char* source,
@@ -202,37 +184,42 @@ MaybeLocal TranscodeFromUcs2(Isolate* isolate,
MaybeStackBuffer destbuf(length_in_chars);
const uint32_t len = ucnv_fromUChars(to.conv, *destbuf, length_in_chars,
*sourcebuf, length_in_chars, status);
- if (U_SUCCESS(*status))
- ret = AsBuffer(isolate, &destbuf, len);
+ if (U_SUCCESS(*status)) {
+ destbuf.SetLength(len);
+ ret = ToBufferEndian(env, &destbuf);
+ }
return ret;
}
-MaybeLocal TranscodeUcs2FromUtf8(Isolate* isolate,
+MaybeLocal TranscodeUcs2FromUtf8(Environment* env,
const char* fromEncoding,
const char* toEncoding,
const char* source,
const size_t source_length,
UErrorCode* status) {
*status = U_ZERO_ERROR;
- MaybeStackBuffer destbuf;
+ MaybeStackBuffer destbuf;
int32_t result_length;
- u_strFromUTF8(*destbuf, kStorageSize, &result_length,
+ u_strFromUTF8(*destbuf, destbuf.capacity(), &result_length,
source, source_length, status);
MaybeLocal ret;
if (U_SUCCESS(*status)) {
- ret = AsBuffer(isolate, &destbuf, result_length * sizeof(**destbuf));
+ destbuf.SetLength(result_length);
+ ret = ToBufferEndian(env, &destbuf);
} else if (*status == U_BUFFER_OVERFLOW_ERROR) {
*status = U_ZERO_ERROR;
destbuf.AllocateSufficientStorage(result_length);
u_strFromUTF8(*destbuf, result_length, &result_length,
source, source_length, status);
- if (U_SUCCESS(*status))
- ret = AsBuffer(isolate, &destbuf, result_length * sizeof(**destbuf));
+ if (U_SUCCESS(*status)) {
+ destbuf.SetLength(result_length);
+ ret = ToBufferEndian(env, &destbuf);
+ }
}
return ret;
}
-MaybeLocal TranscodeUtf8FromUcs2(Isolate* isolate,
+MaybeLocal TranscodeUtf8FromUcs2(Environment* env,
const char* fromEncoding,
const char* toEncoding,
const char* source,
@@ -243,20 +230,21 @@ MaybeLocal TranscodeUtf8FromUcs2(Isolate* isolate,
const size_t length_in_chars = source_length / sizeof(UChar);
int32_t result_length;
MaybeStackBuffer sourcebuf;
- MaybeStackBuffer destbuf;
+ MaybeStackBuffer destbuf;
CopySourceBuffer(&sourcebuf, source, source_length, length_in_chars);
- u_strToUTF8(*destbuf, kStorageSize, &result_length,
+ u_strToUTF8(*destbuf, destbuf.capacity(), &result_length,
*sourcebuf, length_in_chars, status);
if (U_SUCCESS(*status)) {
- ret = AsBuffer(isolate, &destbuf, result_length);
+ destbuf.SetLength(result_length);
+ ret = ToBufferEndian(env, &destbuf);
} else if (*status == U_BUFFER_OVERFLOW_ERROR) {
*status = U_ZERO_ERROR;
destbuf.AllocateSufficientStorage(result_length);
u_strToUTF8(*destbuf, result_length, &result_length, *sourcebuf,
length_in_chars, status);
if (U_SUCCESS(*status)) {
- ret = Buffer::New(isolate, *destbuf, result_length);
- destbuf.Release();
+ destbuf.SetLength(result_length);
+ ret = ToBufferEndian(env, &destbuf);
}
}
return ret;
@@ -322,7 +310,7 @@ void Transcode(const FunctionCallbackInfo&args) {
ABORT();
}
- result = tfn(isolate, EncodingName(fromEncoding), EncodingName(toEncoding),
+ result = tfn(env, EncodingName(fromEncoding), EncodingName(toEncoding),
ts_obj_data, ts_obj_length, &status);
} else {
status = U_ILLEGAL_ARGUMENT_ERROR;
@@ -404,12 +392,8 @@ static void GetVersion(const FunctionCallbackInfo& args) {
}
}
-bool InitializeICUDirectory(const char* icu_data_path) {
- if (icu_data_path != nullptr) {
- flag_icu_data_dir = true;
- u_setDataDirectory(icu_data_path);
- return true; // no error
- } else {
+bool InitializeICUDirectory(const std::string& path) {
+ if (path.empty()) {
UErrorCode status = U_ZERO_ERROR;
#ifdef NODE_HAVE_SMALL_ICU
// install the 'small' data.
@@ -418,6 +402,9 @@ bool InitializeICUDirectory(const char* icu_data_path) {
// no small data, so nothing to do.
#endif // !NODE_HAVE_SMALL_ICU
return (status == U_ZERO_ERROR);
+ } else {
+ u_setDataDirectory(path.c_str());
+ return true; // No error.
}
}
@@ -434,7 +421,7 @@ int32_t ToUnicode(MaybeStackBuffer* buf,
int32_t len = uidna_nameToUnicodeUTF8(uidna,
input, length,
- **buf, buf->length(),
+ **buf, buf->capacity(),
&info,
&status);
@@ -443,13 +430,17 @@ int32_t ToUnicode(MaybeStackBuffer* buf,
buf->AllocateSufficientStorage(len);
len = uidna_nameToUnicodeUTF8(uidna,
input, length,
- **buf, buf->length(),
+ **buf, buf->capacity(),
&info,
&status);
}
- if (U_FAILURE(status))
+ if (U_FAILURE(status)) {
len = -1;
+ buf->SetLength(0);
+ } else {
+ buf->SetLength(len);
+ }
uidna_close(uidna);
return len;
@@ -468,7 +459,7 @@ int32_t ToASCII(MaybeStackBuffer* buf,
int32_t len = uidna_nameToASCII_UTF8(uidna,
input, length,
- **buf, buf->length(),
+ **buf, buf->capacity(),
&info,
&status);
@@ -477,13 +468,17 @@ int32_t ToASCII(MaybeStackBuffer* buf,
buf->AllocateSufficientStorage(len);
len = uidna_nameToASCII_UTF8(uidna,
input, length,
- **buf, buf->length(),
+ **buf, buf->capacity(),
&info,
&status);
}
- if (U_FAILURE(status))
+ if (U_FAILURE(status)) {
len = -1;
+ buf->SetLength(0);
+ } else {
+ buf->SetLength(len);
+ }
uidna_close(uidna);
return len;
diff --git a/src/node_i18n.h b/src/node_i18n.h
index 21a579526ddc1a..21567eeb3ec38f 100644
--- a/src/node_i18n.h
+++ b/src/node_i18n.h
@@ -4,16 +4,17 @@
#if defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS
#include "node.h"
+#include
#if defined(NODE_HAVE_I18N_SUPPORT)
namespace node {
-extern bool flag_icu_data_dir;
+extern std::string icu_data_dir; // NOLINT(runtime/string)
namespace i18n {
-bool InitializeICUDirectory(const char* icu_data_path);
+bool InitializeICUDirectory(const std::string& path);
int32_t ToASCII(MaybeStackBuffer* buf,
const char* input,
diff --git a/src/node_internals.h b/src/node_internals.h
index 0a65be7642ff2a..b68594162b8ab8 100644
--- a/src/node_internals.h
+++ b/src/node_internals.h
@@ -12,6 +12,8 @@
#include
#include
+#include
+
struct sockaddr;
// Variation on NODE_DEFINE_CONSTANT that sets a String value.
@@ -34,7 +36,7 @@ namespace node {
// Set in node.cc by ParseArgs with the value of --openssl-config.
// Used in node_crypto.cc when initializing OpenSSL.
-extern const char* openssl_config;
+extern std::string openssl_config;
// Set in node.cc by ParseArgs when --preserve-symlinks is used.
// Used in node_config.cc to set a constant on process.binding('config')
@@ -106,6 +108,8 @@ void RegisterSignalHandler(int signal,
bool reset_handler = false);
#endif
+bool SafeGetenv(const char* key, std::string* text);
+
template
constexpr size_t arraysize(const T(&)[N]) { return N; }
@@ -194,6 +198,35 @@ v8::MaybeLocal New(Environment* env,
// because ArrayBufferAllocator::Free() deallocates it again with free().
// Mixing operator new and free() is undefined behavior so don't do that.
v8::MaybeLocal New(Environment* env, char* data, size_t length);
+
+// Construct a Buffer from a MaybeStackBuffer (and also its subclasses like
+// Utf8Value and TwoByteValue).
+// If |buf| is invalidated, an empty MaybeLocal is returned, and nothing is
+// changed.
+// If |buf| contains actual data, this method takes ownership of |buf|'s
+// underlying buffer. However, |buf| itself can be reused even after this call,
+// but its capacity, if increased through AllocateSufficientStorage, is not
+// guaranteed to stay the same.
+template
+static v8::MaybeLocal New(Environment* env,
+ MaybeStackBuffer* buf) {
+ v8::MaybeLocal ret;
+ char* src = reinterpret_cast(buf->out());
+ const size_t len_in_bytes = buf->length() * sizeof(buf->out()[0]);
+
+ if (buf->IsAllocated())
+ ret = New(env, src, len_in_bytes);
+ else if (!buf->IsInvalidated())
+ ret = Copy(env, src, len_in_bytes);
+
+ if (ret.IsEmpty())
+ return ret;
+
+ if (buf->IsAllocated())
+ buf->Release();
+
+ return ret;
+}
} // namespace Buffer
} // namespace node
diff --git a/src/node_os.cc b/src/node_os.cc
index 97b1a1d08da6ed..211ac3d01dd8b2 100644
--- a/src/node_os.cc
+++ b/src/node_os.cc
@@ -28,8 +28,10 @@ namespace node {
namespace os {
using v8::Array;
+using v8::ArrayBuffer;
using v8::Boolean;
using v8::Context;
+using v8::Float64Array;
using v8::FunctionCallbackInfo;
using v8::Integer;
using v8::Local;
@@ -182,14 +184,12 @@ static void GetUptime(const FunctionCallbackInfo& args) {
static void GetLoadAvg(const FunctionCallbackInfo& args) {
- Environment* env = Environment::GetCurrent(args);
- double loadavg[3];
+ CHECK(args[0]->IsFloat64Array());
+ Local array = args[0].As();
+ CHECK_EQ(array->Length(), 3);
+ Local ab = array->Buffer();
+ double* loadavg = static_cast(ab->GetContents().Data());
uv_loadavg(loadavg);
- Local loads = Array::New(env->isolate(), 3);
- loads->Set(0, Number::New(env->isolate(), loadavg[0]));
- loads->Set(1, Number::New(env->isolate(), loadavg[1]));
- loads->Set(2, Number::New(env->isolate(), loadavg[2]));
- args.GetReturnValue().Set(loads);
}
diff --git a/src/node_url.cc b/src/node_url.cc
index 0d5e695a3c13a5..5027399e89dd71 100644
--- a/src/node_url.cc
+++ b/src/node_url.cc
@@ -1074,8 +1074,10 @@ namespace url {
SET_HAVE_QUERY()
url.query = base.query;
}
+ break;
}
- break;
+ state = kPath;
+ continue;
case '\\':
case '/':
state = kFileSlash;
@@ -1092,8 +1094,8 @@ namespace url {
}
SET_HAVE_QUERY()
state = kQuery;
+ break;
}
- break;
case '#':
if (base_is_file) {
if (DOES_HAVE_HOST(base)) {
@@ -1109,8 +1111,8 @@ namespace url {
url.query = base.query;
}
state = kFragment;
+ break;
}
- break;
default:
if (base_is_file &&
(!WINDOWS_DRIVE_LETTER(ch, p[1]) ||
diff --git a/src/node_version.h b/src/node_version.h
index 7175e5824dd26d..48c5a3b2b807bf 100644
--- a/src/node_version.h
+++ b/src/node_version.h
@@ -2,10 +2,10 @@
#define SRC_NODE_VERSION_H_
#define NODE_MAJOR_VERSION 7
-#define NODE_MINOR_VERSION 6
-#define NODE_PATCH_VERSION 1
+#define NODE_MINOR_VERSION 7
+#define NODE_PATCH_VERSION 0
-#define NODE_VERSION_IS_RELEASE 0
+#define NODE_VERSION_IS_RELEASE 1
#ifndef NODE_STRINGIFY
#define NODE_STRINGIFY(n) NODE_STRINGIFY_HELPER(n)
diff --git a/src/spawn_sync.cc b/src/spawn_sync.cc
index 79f10a0ea2594d..3fcd61f22a4614 100644
--- a/src/spawn_sync.cc
+++ b/src/spawn_sync.cc
@@ -645,12 +645,17 @@ Local SyncProcessRunner::BuildResultObject() {
Integer::New(env()->isolate(), GetError()));
}
- if (exit_status_ >= 0)
- js_result->Set(env()->status_string(),
- Number::New(env()->isolate(), static_cast(exit_status_)));
- else
+ if (exit_status_ >= 0) {
+ if (term_signal_ > 0) {
+ js_result->Set(env()->status_string(), Null(env()->isolate()));
+ } else {
+ js_result->Set(env()->status_string(),
+ Number::New(env()->isolate(), static_cast(exit_status_)));
+ }
+ } else {
// If exit_status_ < 0 the process was never started because of some error.
js_result->Set(env()->status_string(), Null(env()->isolate()));
+ }
if (term_signal_ > 0)
js_result->Set(env()->signal_string(),
diff --git a/src/tracing/agent.cc b/src/tracing/agent.cc
new file mode 100644
index 00000000000000..97a3e11a2c458c
--- /dev/null
+++ b/src/tracing/agent.cc
@@ -0,0 +1,72 @@
+#include "tracing/agent.h"
+
+#include
+#include
+
+#include "env-inl.h"
+#include "libplatform/libplatform.h"
+
+namespace node {
+namespace tracing {
+
+using v8::platform::tracing::TraceConfig;
+
+Agent::Agent() {}
+
+void Agent::Start(v8::Platform* platform, const char* enabled_categories) {
+ platform_ = platform;
+
+ int err = uv_loop_init(&tracing_loop_);
+ CHECK_EQ(err, 0);
+
+ NodeTraceWriter* trace_writer = new NodeTraceWriter(&tracing_loop_);
+ TraceBuffer* trace_buffer = new NodeTraceBuffer(
+ NodeTraceBuffer::kBufferChunks, trace_writer, &tracing_loop_);
+
+ tracing_controller_ = new TracingController();
+
+ TraceConfig* trace_config = new TraceConfig();
+ if (enabled_categories) {
+ std::stringstream category_list(enabled_categories);
+ while (category_list.good()) {
+ std::string category;
+ getline(category_list, category, ',');
+ trace_config->AddIncludedCategory(category.c_str());
+ }
+ } else {
+ trace_config->AddIncludedCategory("v8");
+ trace_config->AddIncludedCategory("node");
+ }
+
+ // This thread should be created *after* async handles are created
+ // (within NodeTraceWriter and NodeTraceBuffer constructors).
+ // Otherwise the thread could shut down prematurely.
+ err = uv_thread_create(&thread_, ThreadCb, this);
+ CHECK_EQ(err, 0);
+
+ tracing_controller_->Initialize(trace_buffer);
+ tracing_controller_->StartTracing(trace_config);
+ v8::platform::SetTracingController(platform, tracing_controller_);
+}
+
+void Agent::Stop() {
+ if (!IsStarted()) {
+ return;
+ }
+ // Perform final Flush on TraceBuffer. We don't want the tracing controller
+ // to flush the buffer again on destruction of the V8::Platform.
+ tracing_controller_->StopTracing();
+ delete tracing_controller_;
+ // Thread should finish when the tracing loop is stopped.
+ uv_thread_join(&thread_);
+ v8::platform::SetTracingController(platform_, nullptr);
+}
+
+// static
+void Agent::ThreadCb(void* arg) {
+ Agent* agent = static_cast(arg);
+ uv_run(&agent->tracing_loop_, UV_RUN_DEFAULT);
+}
+
+} // namespace tracing
+} // namespace node
diff --git a/src/tracing/agent.h b/src/tracing/agent.h
new file mode 100644
index 00000000000000..098f955192e0cf
--- /dev/null
+++ b/src/tracing/agent.h
@@ -0,0 +1,31 @@
+#ifndef SRC_TRACING_AGENT_H_
+#define SRC_TRACING_AGENT_H_
+
+#include "tracing/node_trace_buffer.h"
+#include "tracing/node_trace_writer.h"
+#include "uv.h"
+#include "v8.h"
+
+namespace node {
+namespace tracing {
+
+class Agent {
+ public:
+ explicit Agent();
+ void Start(v8::Platform* platform, const char* enabled_categories);
+ void Stop();
+
+ private:
+ bool IsStarted() { return platform_ != nullptr; }
+ static void ThreadCb(void* arg);
+
+ uv_thread_t thread_;
+ uv_loop_t tracing_loop_;
+ v8::Platform* platform_ = nullptr;
+ TracingController* tracing_controller_;
+};
+
+} // namespace tracing
+} // namespace node
+
+#endif // SRC_TRACING_AGENT_H_
diff --git a/src/tracing/node_trace_buffer.cc b/src/tracing/node_trace_buffer.cc
new file mode 100644
index 00000000000000..4773e08325a3ef
--- /dev/null
+++ b/src/tracing/node_trace_buffer.cc
@@ -0,0 +1,177 @@
+#include "tracing/node_trace_buffer.h"
+
+namespace node {
+namespace tracing {
+
+InternalTraceBuffer::InternalTraceBuffer(size_t max_chunks, uint32_t id,
+ NodeTraceWriter* trace_writer, NodeTraceBuffer* external_buffer)
+ : id_(id), flushing_(false), max_chunks_(max_chunks),
+ trace_writer_(trace_writer), external_buffer_(external_buffer) {
+ chunks_.resize(max_chunks);
+}
+
+TraceObject* InternalTraceBuffer::AddTraceEvent(uint64_t* handle) {
+ Mutex::ScopedLock scoped_lock(mutex_);
+ // Create new chunk if last chunk is full or there is no chunk.
+ if (total_chunks_ == 0 || chunks_[total_chunks_ - 1]->IsFull()) {
+ auto& chunk = chunks_[total_chunks_++];
+ if (chunk) {
+ chunk->Reset(current_chunk_seq_++);
+ } else {
+ chunk.reset(new TraceBufferChunk(current_chunk_seq_++));
+ }
+ }
+ auto& chunk = chunks_[total_chunks_ - 1];
+ size_t event_index;
+ TraceObject* trace_object = chunk->AddTraceEvent(&event_index);
+ *handle = MakeHandle(total_chunks_ - 1, chunk->seq(), event_index);
+ return trace_object;
+}
+
+TraceObject* InternalTraceBuffer::GetEventByHandle(uint64_t handle) {
+ Mutex::ScopedLock scoped_lock(mutex_);
+ if (handle == 0) {
+ // A handle value of zero never has a trace event associated with it.
+ return NULL;
+ }
+ size_t chunk_index, event_index;
+ uint32_t buffer_id, chunk_seq;
+ ExtractHandle(handle, &buffer_id, &chunk_index, &chunk_seq, &event_index);
+ if (buffer_id != id_ || chunk_index >= total_chunks_) {
+ // Either the chunk belongs to the other buffer, or is outside the current
+ // range of chunks loaded in memory (the latter being true suggests that
+ // the chunk has already been flushed and is no longer in memory.)
+ return NULL;
+ }
+ auto& chunk = chunks_[chunk_index];
+ if (chunk->seq() != chunk_seq) {
+ // Chunk is no longer in memory.
+ return NULL;
+ }
+ return chunk->GetEventAt(event_index);
+}
+
+void InternalTraceBuffer::Flush(bool blocking) {
+ {
+ Mutex::ScopedLock scoped_lock(mutex_);
+ if (total_chunks_ > 0) {
+ flushing_ = true;
+ for (size_t i = 0; i < total_chunks_; ++i) {
+ auto& chunk = chunks_[i];
+ for (size_t j = 0; j < chunk->size(); ++j) {
+ trace_writer_->AppendTraceEvent(chunk->GetEventAt(j));
+ }
+ }
+ total_chunks_ = 0;
+ flushing_ = false;
+ }
+ }
+ trace_writer_->Flush(blocking);
+}
+
+uint64_t InternalTraceBuffer::MakeHandle(
+ size_t chunk_index, uint32_t chunk_seq, size_t event_index) const {
+ return ((static_cast(chunk_seq) * Capacity() +
+ chunk_index * TraceBufferChunk::kChunkSize + event_index) << 1) + id_;
+}
+
+void InternalTraceBuffer::ExtractHandle(
+ uint64_t handle, uint32_t* buffer_id, size_t* chunk_index,
+ uint32_t* chunk_seq, size_t* event_index) const {
+ *buffer_id = static_cast(handle & 0x1);
+ handle >>= 1;
+ *chunk_seq = static_cast(handle / Capacity());
+ size_t indices = handle % Capacity();
+ *chunk_index = indices / TraceBufferChunk::kChunkSize;
+ *event_index = indices % TraceBufferChunk::kChunkSize;
+}
+
+NodeTraceBuffer::NodeTraceBuffer(size_t max_chunks,
+ NodeTraceWriter* trace_writer, uv_loop_t* tracing_loop)
+ : tracing_loop_(tracing_loop), trace_writer_(trace_writer),
+ buffer1_(max_chunks, 0, trace_writer, this),
+ buffer2_(max_chunks, 1, trace_writer, this) {
+ current_buf_.store(&buffer1_);
+
+ flush_signal_.data = this;
+ int err = uv_async_init(tracing_loop_, &flush_signal_, NonBlockingFlushSignalCb);
+ CHECK_EQ(err, 0);
+
+ exit_signal_.data = this;
+ err = uv_async_init(tracing_loop_, &exit_signal_, ExitSignalCb);
+ CHECK_EQ(err, 0);
+}
+
+NodeTraceBuffer::~NodeTraceBuffer() {
+ uv_async_send(&exit_signal_);
+ Mutex::ScopedLock scoped_lock(exit_mutex_);
+ while(!exited_) {
+ exit_cond_.Wait(scoped_lock);
+ }
+}
+
+TraceObject* NodeTraceBuffer::AddTraceEvent(uint64_t* handle) {
+ // If the buffer is full, attempt to perform a flush.
+ if (!TryLoadAvailableBuffer()) {
+ // Assign a value of zero as the trace event handle.
+ // This is equivalent to calling InternalTraceBuffer::MakeHandle(0, 0, 0),
+ // and will cause GetEventByHandle to return NULL if passed as an argument.
+ *handle = 0;
+ return nullptr;
+ }
+ return current_buf_.load()->AddTraceEvent(handle);
+}
+
+TraceObject* NodeTraceBuffer::GetEventByHandle(uint64_t handle) {
+ return current_buf_.load()->GetEventByHandle(handle);
+}
+
+bool NodeTraceBuffer::Flush() {
+ buffer1_.Flush(true);
+ buffer2_.Flush(true);
+ return true;
+}
+
+// Attempts to set current_buf_ such that it references a buffer that can
+// can write at least one trace event. If both buffers are unavailable this
+// method returns false; otherwise it returns true.
+bool NodeTraceBuffer::TryLoadAvailableBuffer() {
+ InternalTraceBuffer* prev_buf = current_buf_.load();
+ if (prev_buf->IsFull()) {
+ uv_async_send(&flush_signal_); // trigger flush on a separate thread
+ InternalTraceBuffer* other_buf = prev_buf == &buffer1_ ?
+ &buffer2_ : &buffer1_;
+ if (!other_buf->IsFull()) {
+ current_buf_.store(other_buf);
+ } else {
+ return false;
+ }
+ }
+ return true;
+}
+
+// static
+void NodeTraceBuffer::NonBlockingFlushSignalCb(uv_async_t* signal) {
+ NodeTraceBuffer* buffer = reinterpret_cast(signal->data);
+ if (buffer->buffer1_.IsFull() && !buffer->buffer1_.IsFlushing()) {
+ buffer->buffer1_.Flush(false);
+ }
+ if (buffer->buffer2_.IsFull() && !buffer->buffer2_.IsFlushing()) {
+ buffer->buffer2_.Flush(false);
+ }
+}
+
+// static
+void NodeTraceBuffer::ExitSignalCb(uv_async_t* signal) {
+ NodeTraceBuffer* buffer = reinterpret_cast(signal->data);
+ uv_close(reinterpret_cast(&buffer->flush_signal_), nullptr);
+ uv_close(reinterpret_cast(&buffer->exit_signal_), [](uv_handle_t* signal) {
+ NodeTraceBuffer* buffer = reinterpret_cast(signal->data);
+ Mutex::ScopedLock scoped_lock(buffer->exit_mutex_);
+ buffer->exited_ = true;
+ buffer->exit_cond_.Signal(scoped_lock);
+ });
+}
+
+} // namespace tracing
+} // namespace node
diff --git a/src/tracing/node_trace_buffer.h b/src/tracing/node_trace_buffer.h
new file mode 100644
index 00000000000000..619799fdb21978
--- /dev/null
+++ b/src/tracing/node_trace_buffer.h
@@ -0,0 +1,89 @@
+#ifndef SRC_NODE_TRACE_BUFFER_H_
+#define SRC_NODE_TRACE_BUFFER_H_
+
+#include "node_mutex.h"
+#include "tracing/node_trace_writer.h"
+#include "libplatform/v8-tracing.h"
+
+#include
+
+namespace node {
+namespace tracing {
+
+using v8::platform::tracing::TraceBuffer;
+using v8::platform::tracing::TraceBufferChunk;
+using v8::platform::tracing::TraceObject;
+
+// forward declaration
+class NodeTraceBuffer;
+
+class InternalTraceBuffer {
+ public:
+ InternalTraceBuffer(size_t max_chunks, uint32_t id,
+ NodeTraceWriter* trace_writer,
+ NodeTraceBuffer* external_buffer);
+
+ TraceObject* AddTraceEvent(uint64_t* handle);
+ TraceObject* GetEventByHandle(uint64_t handle);
+ void Flush(bool blocking);
+ bool IsFull() const {
+ return total_chunks_ == max_chunks_ && chunks_[total_chunks_ - 1]->IsFull();
+ }
+ bool IsFlushing() const {
+ return flushing_;
+ }
+
+ private:
+ uint64_t MakeHandle(size_t chunk_index, uint32_t chunk_seq,
+ size_t event_index) const;
+ void ExtractHandle(uint64_t handle, uint32_t* buffer_id, size_t* chunk_index,
+ uint32_t* chunk_seq, size_t* event_index) const;
+ size_t Capacity() const { return max_chunks_ * TraceBufferChunk::kChunkSize; }
+
+ Mutex mutex_;
+ bool flushing_;
+ size_t max_chunks_;
+ NodeTraceWriter* trace_writer_;
+ NodeTraceBuffer* external_buffer_;
+ std::vector> chunks_;
+ size_t total_chunks_ = 0;
+ uint32_t current_chunk_seq_ = 1;
+ uint32_t id_;
+};
+
+class NodeTraceBuffer : public TraceBuffer {
+ public:
+ NodeTraceBuffer(size_t max_chunks, NodeTraceWriter* trace_writer,
+ uv_loop_t* tracing_loop);
+ ~NodeTraceBuffer();
+
+ TraceObject* AddTraceEvent(uint64_t* handle) override;
+ TraceObject* GetEventByHandle(uint64_t handle) override;
+ bool Flush() override;
+
+ static const size_t kBufferChunks = 1024;
+
+ private:
+ bool TryLoadAvailableBuffer();
+ static void NonBlockingFlushSignalCb(uv_async_t* signal);
+ static void ExitSignalCb(uv_async_t* signal);
+
+ uv_loop_t* tracing_loop_;
+ uv_async_t flush_signal_;
+ uv_async_t exit_signal_;
+ bool exited_ = false;
+ // Used exclusively for exit logic.
+ Mutex exit_mutex_;
+ // Used to wait until async handles have been closed.
+ ConditionVariable exit_cond_;
+ std::unique_ptr trace_writer_;
+ // TODO: Change std::atomic to something less contentious.
+ std::atomic current_buf_;
+ InternalTraceBuffer buffer1_;
+ InternalTraceBuffer buffer2_;
+};
+
+} // namespace tracing
+} // namespace node
+
+#endif // SRC_NODE_TRACING_CONTROLLER_H_
diff --git a/src/tracing/node_trace_writer.cc b/src/tracing/node_trace_writer.cc
new file mode 100644
index 00000000000000..41753c5e65e557
--- /dev/null
+++ b/src/tracing/node_trace_writer.cc
@@ -0,0 +1,185 @@
+#include "tracing/node_trace_writer.h"
+
+#include
+#include
+
+#include "util.h"
+
+namespace node {
+namespace tracing {
+
+NodeTraceWriter::NodeTraceWriter(uv_loop_t* tracing_loop)
+ : tracing_loop_(tracing_loop) {
+ flush_signal_.data = this;
+ int err = uv_async_init(tracing_loop_, &flush_signal_, FlushSignalCb);
+ CHECK_EQ(err, 0);
+
+ exit_signal_.data = this;
+ err = uv_async_init(tracing_loop_, &exit_signal_, ExitSignalCb);
+ CHECK_EQ(err, 0);
+}
+
+void NodeTraceWriter::WriteSuffix() {
+ // If our final log file has traces, then end the file appropriately.
+ // This means that if no trace events are recorded, then no trace file is
+ // produced.
+ bool should_flush = false;
+ {
+ Mutex::ScopedLock scoped_lock(stream_mutex_);
+ if (total_traces_ > 0) {
+ total_traces_ = 0; // so we don't write it again in FlushPrivate
+ // Appends "]}" to stream_.
+ delete json_trace_writer_;
+ should_flush = true;
+ }
+ }
+ if (should_flush) {
+ Flush(true);
+ }
+}
+
+NodeTraceWriter::~NodeTraceWriter() {
+ WriteSuffix();
+ uv_fs_t req;
+ int err;
+ if (fd_ != -1) {
+ err = uv_fs_close(tracing_loop_, &req, fd_, nullptr);
+ CHECK_EQ(err, 0);
+ uv_fs_req_cleanup(&req);
+ }
+ uv_async_send(&exit_signal_);
+ Mutex::ScopedLock scoped_lock(request_mutex_);
+ while(!exited_) {
+ exit_cond_.Wait(scoped_lock);
+ }
+}
+
+void NodeTraceWriter::OpenNewFileForStreaming() {
+ ++file_num_;
+ uv_fs_t req;
+ std::ostringstream log_file;
+ log_file << "node_trace." << file_num_ << ".log";
+ fd_ = uv_fs_open(tracing_loop_, &req, log_file.str().c_str(),
+ O_CREAT | O_WRONLY | O_TRUNC, 0644, NULL);
+ CHECK_NE(fd_, -1);
+ uv_fs_req_cleanup(&req);
+}
+
+void NodeTraceWriter::AppendTraceEvent(TraceObject* trace_event) {
+ Mutex::ScopedLock scoped_lock(stream_mutex_);
+ // If this is the first trace event, open a new file for streaming.
+ if (total_traces_ == 0) {
+ OpenNewFileForStreaming();
+ // Constructing a new JSONTraceWriter object appends "{\"traceEvents\":["
+ // to stream_.
+ // In other words, the constructor initializes the serialization stream
+ // to a state where we can start writing trace events to it.
+ // Repeatedly constructing and destroying json_trace_writer_ allows
+ // us to use V8's JSON writer instead of implementing our own.
+ json_trace_writer_ = TraceWriter::CreateJSONTraceWriter(stream_);
+ }
+ ++total_traces_;
+ json_trace_writer_->AppendTraceEvent(trace_event);
+}
+
+void NodeTraceWriter::FlushPrivate() {
+ std::string str;
+ int highest_request_id;
+ {
+ Mutex::ScopedLock stream_scoped_lock(stream_mutex_);
+ if (total_traces_ >= kTracesPerFile) {
+ total_traces_ = 0;
+ // Destroying the member JSONTraceWriter object appends "]}" to
+ // stream_ - in other words, ending a JSON file.
+ delete json_trace_writer_;
+ }
+ // str() makes a copy of the contents of the stream.
+ str = stream_.str();
+ stream_.str("");
+ stream_.clear();
+ }
+ {
+ Mutex::ScopedLock request_scoped_lock(request_mutex_);
+ highest_request_id = num_write_requests_;
+ }
+ WriteToFile(std::move(str), highest_request_id);
+}
+
+void NodeTraceWriter::FlushSignalCb(uv_async_t* signal) {
+ NodeTraceWriter* trace_writer = static_cast(signal->data);
+ trace_writer->FlushPrivate();
+}
+
+// TODO: Remove (is it necessary to change the API? Since because of WriteSuffix
+// it no longer matters whether it's true or false)
+void NodeTraceWriter::Flush() {
+ Flush(true);
+}
+
+void NodeTraceWriter::Flush(bool blocking) {
+ Mutex::ScopedLock scoped_lock(request_mutex_);
+ if (!json_trace_writer_) {
+ return;
+ }
+ int request_id = ++num_write_requests_;
+ int err = uv_async_send(&flush_signal_);
+ CHECK_EQ(err, 0);
+ if (blocking) {
+ // Wait until data associated with this request id has been written to disk.
+ // This guarantees that data from all earlier requests have also been
+ // written.
+ while (request_id > highest_request_id_completed_) {
+ request_cond_.Wait(scoped_lock);
+ }
+ }
+}
+
+void NodeTraceWriter::WriteToFile(std::string&& str, int highest_request_id) {
+ WriteRequest* write_req = new WriteRequest();
+ write_req->str = std::move(str);
+ write_req->writer = this;
+ write_req->highest_request_id = highest_request_id;
+ uv_buf_t uv_buf = uv_buf_init(const_cast(write_req->str.c_str()),
+ write_req->str.length());
+ request_mutex_.Lock();
+ // Manage a queue of WriteRequest objects because the behavior of uv_write is
+ // is undefined if the same WriteRequest object is used more than once
+ // between WriteCb calls. In addition, this allows us to keep track of the id
+ // of the latest write request that actually been completed.
+ write_req_queue_.push(write_req);
+ request_mutex_.Unlock();
+ int err = uv_fs_write(tracing_loop_, reinterpret_cast(write_req),
+ fd_, &uv_buf, 1, -1, WriteCb);
+ CHECK_EQ(err, 0);
+}
+
+void NodeTraceWriter::WriteCb(uv_fs_t* req) {
+ WriteRequest* write_req = reinterpret_cast(req);
+ CHECK_GE(write_req->req.result, 0);
+
+ NodeTraceWriter* writer = write_req->writer;
+ int highest_request_id = write_req->highest_request_id;
+ {
+ Mutex::ScopedLock scoped_lock(writer->request_mutex_);
+ CHECK_EQ(write_req, writer->write_req_queue_.front());
+ writer->write_req_queue_.pop();
+ writer->highest_request_id_completed_ = highest_request_id;
+ writer->request_cond_.Broadcast(scoped_lock);
+ }
+ delete write_req;
+}
+
+// static
+void NodeTraceWriter::ExitSignalCb(uv_async_t* signal) {
+ NodeTraceWriter* trace_writer = static_cast(signal->data);
+ uv_close(reinterpret_cast(&trace_writer->flush_signal_), nullptr);
+ uv_close(reinterpret_cast(&trace_writer->exit_signal_), [](uv_handle_t* signal) {
+ NodeTraceWriter* trace_writer = static_cast(signal->data);
+ Mutex::ScopedLock scoped_lock(trace_writer->request_mutex_);
+ trace_writer->exited_ = true;
+ trace_writer->exit_cond_.Signal(scoped_lock);
+ });
+}
+
+} // namespace tracing
+} // namespace node
diff --git a/src/tracing/node_trace_writer.h b/src/tracing/node_trace_writer.h
new file mode 100644
index 00000000000000..5813db0ab1fd33
--- /dev/null
+++ b/src/tracing/node_trace_writer.h
@@ -0,0 +1,74 @@
+#ifndef SRC_NODE_TRACE_WRITER_H_
+#define SRC_NODE_TRACE_WRITER_H_
+
+#include
+#include
+
+#include "node_mutex.h"
+#include "libplatform/v8-tracing.h"
+#include "uv.h"
+
+namespace node {
+namespace tracing {
+
+using v8::platform::tracing::TraceObject;
+using v8::platform::tracing::TraceWriter;
+using v8::platform::tracing::TracingController;
+
+class NodeTraceWriter : public TraceWriter {
+ public:
+ NodeTraceWriter(uv_loop_t* tracing_loop);
+ ~NodeTraceWriter();
+
+ void AppendTraceEvent(TraceObject* trace_event) override;
+ void Flush() override;
+ void Flush(bool blocking);
+
+ static const int kTracesPerFile = 1 << 19;
+
+ private:
+ struct WriteRequest {
+ uv_fs_t req;
+ NodeTraceWriter* writer;
+ std::string str;
+ int highest_request_id;
+ };
+
+ static void WriteCb(uv_fs_t* req);
+ void OpenNewFileForStreaming();
+ void WriteToFile(std::string&& str, int highest_request_id);
+ void WriteSuffix();
+ static void FlushSignalCb(uv_async_t* signal);
+ void FlushPrivate();
+ static void ExitSignalCb(uv_async_t* signal);
+
+ uv_loop_t* tracing_loop_;
+ // Triggers callback to initiate writing the contents of stream_ to disk.
+ uv_async_t flush_signal_;
+ // Triggers callback to close async objects, ending the tracing thread.
+ uv_async_t exit_signal_;
+ // Prevents concurrent R/W on state related to serialized trace data
+ // before it's written to disk, namely stream_ and total_traces_.
+ Mutex stream_mutex_;
+ // Prevents concurrent R/W on state related to write requests.
+ Mutex request_mutex_;
+ // Allows blocking calls to Flush() to wait on a condition for
+ // trace events to be written to disk.
+ ConditionVariable request_cond_;
+ // Used to wait until async handles have been closed.
+ ConditionVariable exit_cond_;
+ int fd_ = -1;
+ std::queue write_req_queue_;
+ int num_write_requests_ = 0;
+ int highest_request_id_completed_ = 0;
+ int total_traces_ = 0;
+ int file_num_ = 0;
+ std::ostringstream stream_;
+ TraceWriter* json_trace_writer_ = nullptr;
+ bool exited_ = false;
+};
+
+} // namespace tracing
+} // namespace node
+
+#endif // SRC_NODE_TRACE_WRITER_H_
diff --git a/src/tracing/trace_event.cc b/src/tracing/trace_event.cc
new file mode 100644
index 00000000000000..b83cae6e05c10c
--- /dev/null
+++ b/src/tracing/trace_event.cc
@@ -0,0 +1,17 @@
+#include "tracing/trace_event.h"
+
+namespace node {
+namespace tracing {
+
+v8::Platform* platform_ = nullptr;
+
+void TraceEventHelper::SetCurrentPlatform(v8::Platform* platform) {
+ platform_ = platform;
+}
+
+v8::Platform* TraceEventHelper::GetCurrentPlatform() {
+ return platform_;
+}
+
+} // namespace tracing
+} // namespace node
diff --git a/src/tracing/trace_event.h b/src/tracing/trace_event.h
new file mode 100644
index 00000000000000..a035fd51500bb9
--- /dev/null
+++ b/src/tracing/trace_event.h
@@ -0,0 +1,1710 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SRC_TRACING_TRACE_EVENT_H_
+#define SRC_TRACING_TRACE_EVENT_H_
+
+#include
+
+#include "v8-platform.h"
+
+// This header file defines implementation details of how the trace macros in
+// trace_event_common.h collect and store trace events. Anything not
+// implementation-specific should go in trace_macros_common.h instead of here.
+
+// From v8/base/trace_event/common/trace_event_common.h
+
+// This header file defines the set of trace_event macros without specifying
+// how the events actually get collected and stored. If you need to expose trace
+// events to some other universe, you can copy-and-paste this file as well as
+// trace_event.h, modifying the macros contained there as necessary for the
+// target platform. The end result is that multiple libraries can funnel events
+// through to a shared trace event collector.
+
+// IMPORTANT: To avoid conflicts, if you need to modify this file for a library,
+// land your change in base/ first, and then copy-and-paste it.
+
+// Trace events are for tracking application performance and resource usage.
+// Macros are provided to track:
+// Begin and end of function calls
+// Counters
+//
+// Events are issued against categories. Whereas LOG's
+// categories are statically defined, TRACE categories are created
+// implicitly with a string. For example:
+// TRACE_EVENT_INSTANT0("MY_SUBSYSTEM", "SomeImportantEvent",
+// TRACE_EVENT_SCOPE_THREAD)
+//
+// It is often the case that one trace may belong in multiple categories at the
+// same time. The first argument to the trace can be a comma-separated list of
+// categories, forming a category group, like:
+//
+// TRACE_EVENT_INSTANT0("input,views", "OnMouseOver", TRACE_EVENT_SCOPE_THREAD)
+//
+// We can enable/disable tracing of OnMouseOver by enabling/disabling either
+// category.
+//
+// Events can be INSTANT, or can be pairs of BEGIN and END in the same scope:
+// TRACE_EVENT_BEGIN0("MY_SUBSYSTEM", "SomethingCostly")
+// doSomethingCostly()
+// TRACE_EVENT_END0("MY_SUBSYSTEM", "SomethingCostly")
+// Note: our tools can't always determine the correct BEGIN/END pairs unless
+// these are used in the same scope. Use ASYNC_BEGIN/ASYNC_END macros if you
+// need them to be in separate scopes.
+//
+// A common use case is to trace entire function scopes. This
+// issues a trace BEGIN and END automatically:
+// void doSomethingCostly() {
+// TRACE_EVENT0("MY_SUBSYSTEM", "doSomethingCostly");
+// ...
+// }
+//
+// Additional parameters can be associated with an event:
+// void doSomethingCostly2(int howMuch) {
+// TRACE_EVENT1("MY_SUBSYSTEM", "doSomethingCostly",
+// "howMuch", howMuch);
+// ...
+// }
+//
+// The trace system will automatically add to this information the
+// current process id, thread id, and a timestamp in microseconds.
+//
+// To trace an asynchronous procedure such as an IPC send/receive, use
+// ASYNC_BEGIN and ASYNC_END:
+// [single threaded sender code]
+// static int send_count = 0;
+// ++send_count;
+// TRACE_EVENT_ASYNC_BEGIN0("ipc", "message", send_count);
+// Send(new MyMessage(send_count));
+// [receive code]
+// void OnMyMessage(send_count) {
+// TRACE_EVENT_ASYNC_END0("ipc", "message", send_count);
+// }
+// The third parameter is a unique ID to match ASYNC_BEGIN/ASYNC_END pairs.
+// ASYNC_BEGIN and ASYNC_END can occur on any thread of any traced process.
+// Pointers can be used for the ID parameter, and they will be mangled
+// internally so that the same pointer on two different processes will not
+// match. For example:
+// class MyTracedClass {
+// public:
+// MyTracedClass() {
+// TRACE_EVENT_ASYNC_BEGIN0("category", "MyTracedClass", this);
+// }
+// ~MyTracedClass() {
+// TRACE_EVENT_ASYNC_END0("category", "MyTracedClass", this);
+// }
+// }
+//
+// Trace event also supports counters, which is a way to track a quantity
+// as it varies over time. Counters are created with the following macro:
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter", g_myCounterValue);
+//
+// Counters are process-specific. The macro itself can be issued from any
+// thread, however.
+//
+// Sometimes, you want to track two counters at once. You can do this with two
+// counter macros:
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter0", g_myCounterValue[0]);
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter1", g_myCounterValue[1]);
+// Or you can do it with a combined macro:
+// TRACE_COUNTER2("MY_SUBSYSTEM", "myCounter",
+// "bytesPinned", g_myCounterValue[0],
+// "bytesAllocated", g_myCounterValue[1]);
+// This indicates to the tracing UI that these counters should be displayed
+// in a single graph, as a summed area chart.
+//
+// Since counters are in a global namespace, you may want to disambiguate with a
+// unique ID, by using the TRACE_COUNTER_ID* variations.
+//
+// By default, trace collection is compiled in, but turned off at runtime.
+// Collecting trace data is the responsibility of the embedding
+// application. In Chrome's case, navigating to about:tracing will turn on
+// tracing and display data collected across all active processes.
+//
+//
+// Memory scoping note:
+// Tracing copies the pointers, not the string content, of the strings passed
+// in for category_group, name, and arg_names. Thus, the following code will
+// cause problems:
+// char* str = strdup("importantName");
+// TRACE_EVENT_INSTANT0("SUBSYSTEM", str); // BAD!
+// free(str); // Trace system now has dangling pointer
+//
+// To avoid this issue with the |name| and |arg_name| parameters, use the
+// TRACE_EVENT_COPY_XXX overloads of the macros at additional runtime overhead.
+// Notes: The category must always be in a long-lived char* (i.e. static const).
+// The |arg_values|, when used, are always deep copied with the _COPY
+// macros.
+//
+// When are string argument values copied:
+// const char* arg_values are only referenced by default:
+// TRACE_EVENT1("category", "name",
+// "arg1", "literal string is only referenced");
+// Use TRACE_STR_COPY to force copying of a const char*:
+// TRACE_EVENT1("category", "name",
+// "arg1", TRACE_STR_COPY("string will be copied"));
+// std::string arg_values are always copied:
+// TRACE_EVENT1("category", "name",
+// "arg1", std::string("string will be copied"));
+//
+//
+// Convertable notes:
+// Converting a large data type to a string can be costly. To help with this,
+// the trace framework provides an interface ConvertableToTraceFormat. If you
+// inherit from it and implement the AppendAsTraceFormat method the trace
+// framework will call back to your object to convert a trace output time. This
+// means, if the category for the event is disabled, the conversion will not
+// happen.
+//
+// class MyData : public base::trace_event::ConvertableToTraceFormat {
+// public:
+// MyData() {}
+// void AppendAsTraceFormat(std::string* out) const override {
+// out->append("{\"foo\":1}");
+// }
+// private:
+// ~MyData() override {}
+// DISALLOW_COPY_AND_ASSIGN(MyData);
+// };
+//
+// TRACE_EVENT1("foo", "bar", "data",
+// std::unique_ptr(new MyData()));
+//
+// The trace framework will take ownership if the passed pointer and it will
+// be free'd when the trace buffer is flushed.
+//
+// Note, we only do the conversion when the buffer is flushed, so the provided
+// data object should not be modified after it's passed to the trace framework.
+//
+//
+// Thread Safety:
+// A thread safe singleton and mutex are used for thread safety. Category
+// enabled flags are used to limit the performance impact when the system
+// is not enabled.
+//
+// TRACE_EVENT macros first cache a pointer to a category. The categories are
+// statically allocated and safe at all times, even after exit. Fetching a
+// category is protected by the TraceLog::lock_. Multiple threads initializing
+// the static variable is safe, as they will be serialized by the lock and
+// multiple calls will return the same pointer to the category.
+//
+// Then the category_group_enabled flag is checked. This is a unsigned char, and
+// not intended to be multithread safe. It optimizes access to AddTraceEvent
+// which is threadsafe internally via TraceLog::lock_. The enabled flag may
+// cause some threads to incorrectly call or skip calling AddTraceEvent near
+// the time of the system being enabled or disabled. This is acceptable as
+// we tolerate some data loss while the system is being enabled/disabled and
+// because AddTraceEvent is threadsafe internally and checks the enabled state
+// again under lock.
+//
+// Without the use of these static category pointers and enabled flags all
+// trace points would carry a significant performance cost of acquiring a lock
+// and resolving the category.
+
+#if defined(TRACE_EVENT0)
+#error "Another copy of this file has already been included."
+#endif
+
+// This will mark the trace event as disabled by default. The user will need
+// to explicitly enable the event.
+#define TRACE_DISABLED_BY_DEFAULT(name) "disabled-by-default-" name
+
+// Records a pair of begin and end events called "name" for the current
+// scope, with 0, 1 or 2 associated arguments. If the category is not
+// enabled, then this does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name)
+#define TRACE_EVENT_WITH_FLOW0(category_group, name, bind_id, flow_flags) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \
+ flow_flags)
+#define TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val)
+#define TRACE_EVENT_WITH_FLOW1(category_group, name, bind_id, flow_flags, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \
+ flow_flags, arg1_name, arg1_val)
+#define TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, \
+ arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_WITH_FLOW2(category_group, name, bind_id, flow_flags, \
+ arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \
+ flow_flags, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+// UNSHIPPED_TRACE_EVENT* are like TRACE_EVENT* except that they are not
+// included in official builds.
+
+#if OFFICIAL_BUILD
+#undef TRACING_IS_OFFICIAL_BUILD
+#define TRACING_IS_OFFICIAL_BUILD 1
+#elif !defined(TRACING_IS_OFFICIAL_BUILD)
+#define TRACING_IS_OFFICIAL_BUILD 0
+#endif
+
+#if TRACING_IS_OFFICIAL_BUILD
+#define UNSHIPPED_TRACE_EVENT0(category_group, name) (void)0
+#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+ (void)0
+#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ (void)0
+#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) (void)0
+#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, \
+ arg1_val) \
+ (void)0
+#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ (void)0
+#else
+#define UNSHIPPED_TRACE_EVENT0(category_group, name) \
+ TRACE_EVENT0(category_group, name)
+#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+ TRACE_EVENT1(category_group, name, arg1_name, arg1_val)
+#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
+#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) \
+ TRACE_EVENT_INSTANT0(category_group, name, scope)
+#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, \
+ arg1_val) \
+ TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val)
+#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#endif
+
+// Records a single event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT_INSTANT0(category_group, name, scope) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_NONE | scope)
+#define TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_NONE | scope, arg1_name, arg1_val)
+#define TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_NONE | scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_INSTANT0(category_group, name, scope) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_COPY | scope)
+#define TRACE_EVENT_COPY_INSTANT1(category_group, name, scope, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_COPY | scope, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_INSTANT2(category_group, name, scope, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_COPY | scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+#define TRACE_EVENT_INSTANT_WITH_TIMESTAMP0(category_group, name, scope, \
+ timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_INSTANT, category_group, name, 0, 0, timestamp, \
+ TRACE_EVENT_FLAG_NONE | scope)
+
+// Syntactic sugars for the sampling tracing in the main thread.
+#define TRACE_EVENT_SCOPED_SAMPLING_STATE(category, name) \
+ TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(0, category, name)
+#define TRACE_EVENT_GET_SAMPLING_STATE() \
+ TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(0)
+#define TRACE_EVENT_SET_SAMPLING_STATE(category, name) \
+ TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(0, category, name)
+#define TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(categoryAndName) \
+ TRACE_EVENT_SET_NONCONST_SAMPLING_STATE_FOR_BUCKET(0, categoryAndName)
+
+// Records a single BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT_BEGIN0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_BEGIN1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_BEGIN2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_BEGIN0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_BEGIN1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_BEGIN2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_BEGINx but with a custom |at| timestamp provided.
+// - |id| is used to match the _BEGIN event with the _END event.
+// Events are considered to match if their category_group, name and id values
+// all match. |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+#define TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(category_group, name, id, \
+ thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0( \
+ category_group, name, id, thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP1( \
+ category_group, name, id, thread_id, timestamp, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP2( \
+ category_group, name, id, thread_id, timestamp, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, \
+ arg2_val)
+
+// Records a single END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT_END0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_END1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_END2(category_group, name, arg1_name, arg1_val, arg2_name, \
+ arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_END0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_END1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_END2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+#define TRACE_EVENT_MARK_WITH_TIMESTAMP1(category_group, name, timestamp, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_MARK, category_group, name, 0, 0, timestamp, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+
+#define TRACE_EVENT_COPY_MARK(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_MARK, category_group, name, \
+ TRACE_EVENT_FLAG_COPY)
+
+#define TRACE_EVENT_COPY_MARK_WITH_TIMESTAMP(category_group, name, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_MARK, category_group, name, 0, 0, timestamp, \
+ TRACE_EVENT_FLAG_COPY)
+
+// Similar to TRACE_EVENT_ENDx but with a custom |at| timestamp provided.
+// - |id| is used to match the _BEGIN event with the _END event.
+// Events are considered to match if their category_group, name and id values
+// all match. |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+#define TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(category_group, name, id, \
+ thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0( \
+ category_group, name, id, thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP1( \
+ category_group, name, id, thread_id, timestamp, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP2( \
+ category_group, name, id, thread_id, timestamp, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, \
+ arg2_val)
+
+// Records the value of a counter called "name" immediately. Value
+// must be representable as a 32 bit integer.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_COUNTER1(category_group, name, value) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, "value", \
+ static_cast(value))
+#define TRACE_COPY_COUNTER1(category_group, name, value) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, "value", \
+ static_cast(value))
+
+// Records the values of a multi-parted counter called "name" immediately.
+// The UI will treat value1 and value2 as parts of a whole, displaying their
+// values as a stacked-bar chart.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_COUNTER2(category_group, name, value1_name, value1_val, \
+ value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, value1_name, \
+ static_cast(value1_val), value2_name, \
+ static_cast(value2_val))
+#define TRACE_COPY_COUNTER2(category_group, name, value1_name, value1_val, \
+ value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, value1_name, \
+ static_cast(value1_val), value2_name, \
+ static_cast(value2_val))
+
+// Similar to TRACE_COUNTERx, but with a custom |timestamp| provided.
+#define TRACE_COUNTER_WITH_TIMESTAMP1(category_group, name, timestamp, value) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
+ TRACE_EVENT_PHASE_COUNTER, category_group, name, timestamp, \
+ TRACE_EVENT_FLAG_NONE, "value", static_cast(value))
+
+#define TRACE_COUNTER_WITH_TIMESTAMP2(category_group, name, timestamp, \
+ value1_name, value1_val, value2_name, \
+ value2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
+ TRACE_EVENT_PHASE_COUNTER, category_group, name, timestamp, \
+ TRACE_EVENT_FLAG_NONE, value1_name, static_cast(value1_val), \
+ value2_name, static_cast(value2_val))
+
+// Records the value of a counter called "name" immediately. Value
+// must be representable as a 32 bit integer.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to disambiguate counters with the same name. It must either
+// be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
+// will be xored with a hash of the process ID so that the same pointer on
+// two different processes will not collide.
+#define TRACE_COUNTER_ID1(category_group, name, id, value) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE, "value", \
+ static_cast(value))
+#define TRACE_COPY_COUNTER_ID1(category_group, name, id, value) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+ name, id, TRACE_EVENT_FLAG_COPY, "value", \
+ static_cast(value))
+
+// Records the values of a multi-parted counter called "name" immediately.
+// The UI will treat value1 and value2 as parts of a whole, displaying their
+// values as a stacked-bar chart.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to disambiguate counters with the same name. It must either
+// be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
+// will be xored with a hash of the process ID so that the same pointer on
+// two different processes will not collide.
+#define TRACE_COUNTER_ID2(category_group, name, id, value1_name, value1_val, \
+ value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE, \
+ value1_name, static_cast(value1_val), \
+ value2_name, static_cast(value2_val))
+#define TRACE_COPY_COUNTER_ID2(category_group, name, id, value1_name, \
+ value1_val, value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+ name, id, TRACE_EVENT_FLAG_COPY, \
+ value1_name, static_cast(value1_val), \
+ value2_name, static_cast(value2_val))
+
+// TRACE_EVENT_SAMPLE_* events are injected by the sampling profiler.
+#define TRACE_EVENT_SAMPLE_WITH_TID_AND_TIMESTAMP0(category_group, name, \
+ thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \
+ TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_SAMPLE_WITH_TID_AND_TIMESTAMP1( \
+ category_group, name, thread_id, timestamp, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+
+#define TRACE_EVENT_SAMPLE_WITH_TID_AND_TIMESTAMP2(category_group, name, \
+ thread_id, timestamp, \
+ arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// ASYNC_STEP_* APIs should be only used by legacy code. New code should
+// consider using NESTABLE_ASYNC_* APIs to describe substeps within an async
+// event.
+// Records a single ASYNC_BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to match the ASYNC_BEGIN event with the ASYNC_END event. ASYNC
+// events are considered to match if their category_group, name and id values
+// all match. |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+//
+// An asynchronous operation can consist of multiple phases. The first phase is
+// defined by the ASYNC_BEGIN calls. Additional phases can be defined using the
+// ASYNC_STEP_INTO or ASYNC_STEP_PAST macros. The ASYNC_STEP_INTO macro will
+// annotate the block following the call. The ASYNC_STEP_PAST macro will
+// annotate the block prior to the call. Note that any particular event must use
+// only STEP_INTO or STEP_PAST macros; they can not mix and match. When the
+// operation completes, call ASYNC_END.
+//
+// An ASYNC trace typically occurs on a single thread (if not, they will only be
+// drawn on the thread defined in the ASYNC_BEGIN event), but all events in that
+// operation must use the same |name| and |id|. Each step can have its own
+// args.
+#define TRACE_EVENT_ASYNC_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_ASYNC_BEGINx but with a custom |at| timestamp
+// provided.
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, id, \
+ timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP1( \
+ category_group, name, id, timestamp, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+ arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP2(category_group, name, id, \
+ timestamp, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+ arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, id, \
+ timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+
+// Records a single ASYNC_STEP_INTO event for |step| immediately. If the
+// category is not enabled, then this does nothing. The |name| and |id| must
+// match the ASYNC_BEGIN event above. The |step| param identifies this step
+// within the async event. This should be called at the beginning of the next
+// phase of an asynchronous operation. The ASYNC_BEGIN event must not have any
+// ASYNC_STEP_PAST events.
+#define TRACE_EVENT_ASYNC_STEP_INTO0(category_group, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_INTO, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_ASYNC_STEP_INTO1(category_group, name, id, step, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_STEP_INTO, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val)
+
+// Similar to TRACE_EVENT_ASYNC_STEP_INTOx but with a custom |at| timestamp
+// provided.
+#define TRACE_EVENT_ASYNC_STEP_INTO_WITH_TIMESTAMP0(category_group, name, id, \
+ step, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_STEP_INTO, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+ "step", step)
+
+// Records a single ASYNC_STEP_PAST event for |step| immediately. If the
+// category is not enabled, then this does nothing. The |name| and |id| must
+// match the ASYNC_BEGIN event above. The |step| param identifies this step
+// within the async event. This should be called at the beginning of the next
+// phase of an asynchronous operation. The ASYNC_BEGIN event must not have any
+// ASYNC_STEP_INTO events.
+#define TRACE_EVENT_ASYNC_STEP_PAST0(category_group, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_PAST, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_ASYNC_STEP_PAST1(category_group, name, id, step, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_STEP_PAST, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val)
+
+// Records a single ASYNC_END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+#define TRACE_EVENT_ASYNC_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_END1(category_group, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_END2(category_group, name, id, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_ASYNC_END1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_END2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_ASYNC_ENDx but with a custom |at| timestamp provided.
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP0(category_group, name, id, \
+ timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP1(category_group, name, id, \
+ timestamp, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+ arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP2(category_group, name, id, \
+ timestamp, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+ arg1_name, arg1_val, arg2_name, arg2_val)
+
+// NESTABLE_ASYNC_* APIs are used to describe an async operation, which can
+// be nested within a NESTABLE_ASYNC event and/or have inner NESTABLE_ASYNC
+// events.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - A pair of NESTABLE_ASYNC_BEGIN event and NESTABLE_ASYNC_END event is
+// considered as a match if their category_group, name and id all match.
+// - |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+// - |id| is used to match a child NESTABLE_ASYNC event with its parent
+// NESTABLE_ASYNC event. Therefore, events in the same nested event tree must
+// be logged using the same id and category_group.
+//
+// Unmatched NESTABLE_ASYNC_END event will be parsed as an event that starts
+// at the first NESTABLE_ASYNC event of that id, and unmatched
+// NESTABLE_ASYNC_BEGIN event will be parsed as an event that ends at the last
+// NESTABLE_ASYNC event of that id. Corresponding warning messages for
+// unmatched events will be shown in the analysis view.
+
+// Records a single NESTABLE_ASYNC_BEGIN event called "name" immediately, with
+// 0, 1 or 2 associated arguments. If the category is not enabled, then this
+// does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+// Records a single NESTABLE_ASYNC_END event called "name" immediately, with 0
+// or 2 associated arguments. If the category is not enabled, then this does
+// nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+// Records a single NESTABLE_ASYNC_END event called "name" immediately, with 1
+// associated argument. If the category is not enabled, then this does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_END1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_END2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
+// with one associated argument. If the category is not enabled, then this
+// does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT1(category_group, name, id, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
+// with 2 associated arguments. If the category is not enabled, then this
+// does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2( \
+ category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TTS2( \
+ category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_ASYNC_TTS | TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TTS2( \
+ category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_FLAG_ASYNC_TTS | TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_NESTABLE_ASYNC_{BEGIN,END}x but with a custom
+// |timestamp| provided.
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, \
+ id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP0(category_group, name, \
+ id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0( \
+ category_group, name, id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TIMESTAMP0( \
+ category_group, name, id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+
+// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
+// with 2 associated arguments. If the category is not enabled, then this
+// does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2( \
+ category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single FLOW_BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to match the FLOW_BEGIN event with the FLOW_END event. FLOW
+// events are considered to match if their category_group, name and id values
+// all match. |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+// FLOW events are different from ASYNC events in how they are drawn by the
+// tracing UI. A FLOW defines asynchronous data flow, such as posting a task
+// (FLOW_BEGIN) and later executing that task (FLOW_END). Expect FLOWs to be
+// drawn as lines or arrows from FLOW_BEGIN scopes to FLOW_END scopes. Similar
+// to ASYNC, a FLOW can consist of multiple phases. The first phase is defined
+// by the FLOW_BEGIN calls. Additional phases can be defined using the FLOW_STEP
+// macros. When the operation completes, call FLOW_END. An async operation can
+// span threads and processes, but all events in that operation must use the
+// same |name| and |id|. Each event can have its own args.
+#define TRACE_EVENT_FLOW_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_FLOW_BEGIN1(category_group, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_FLOW_BEGIN2(category_group, name, id, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_FLOW_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_FLOW_BEGIN1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_FLOW_BEGIN2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single FLOW_STEP event for |step| immediately. If the category
+// is not enabled, then this does nothing. The |name| and |id| must match the
+// FLOW_BEGIN event above. The |step| param identifies this step within the
+// async event. This should be called at the beginning of the next phase of an
+// asynchronous operation.
+#define TRACE_EVENT_FLOW_STEP0(category_group, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_FLOW_STEP1(category_group, name, id, step, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_FLOW_STEP0(category_group, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, "step", step)
+#define TRACE_EVENT_COPY_FLOW_STEP1(category_group, name, id, step, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, "step", step, arg1_name, arg1_val)
+
+// Records a single FLOW_END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+#define TRACE_EVENT_FLOW_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_FLOW_END_BIND_TO_ENCLOSING0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, \
+ TRACE_EVENT_FLAG_BIND_TO_ENCLOSING)
+#define TRACE_EVENT_FLOW_END1(category_group, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE, arg1_name, \
+ arg1_val)
+#define TRACE_EVENT_FLOW_END2(category_group, name, id, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE, arg1_name, \
+ arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_FLOW_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_FLOW_END1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_COPY, arg1_name, \
+ arg1_val)
+#define TRACE_EVENT_COPY_FLOW_END2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_COPY, arg1_name, \
+ arg1_val, arg2_name, arg2_val)
+
+// Special trace event macro to trace task execution with the location where it
+// was posted from.
+#define TRACE_TASK_EXECUTION(run_function, task) \
+ INTERNAL_TRACE_TASK_EXECUTION(run_function, task)
+
+// TRACE_EVENT_METADATA* events are information related to other
+// injected events, not events in their own right.
+#define TRACE_EVENT_METADATA1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_METADATA_ADD(category_group, name, arg1_name, arg1_val)
+
+// Records a clock sync event.
+#define TRACE_EVENT_CLOCK_SYNC_RECEIVER(sync_id) \
+ INTERNAL_TRACE_EVENT_ADD( \
+ TRACE_EVENT_PHASE_CLOCK_SYNC, "__metadata", "clock_sync", \
+ TRACE_EVENT_FLAG_NONE, "sync_id", sync_id)
+#define TRACE_EVENT_CLOCK_SYNC_ISSUER(sync_id, issue_ts, issue_end_ts) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
+ TRACE_EVENT_PHASE_CLOCK_SYNC, "__metadata", "clock_sync", \
+ issue_end_ts.ToInternalValue(), TRACE_EVENT_FLAG_NONE, \
+ "sync_id", sync_id, "issue_ts", issue_ts.ToInternalValue())
+
+// Macros to track the life time and value of arbitrary client objects.
+// See also TraceTrackableObject.
+#define TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_CREATE_OBJECT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category_group, name, id, \
+ snapshot) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
+
+#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID_AND_TIMESTAMP( \
+ category_group, name, id, timestamp, snapshot) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, \
+ TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
+
+#define TRACE_EVENT_OBJECT_DELETED_WITH_ID(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_DELETE_OBJECT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
+
+// Records entering and leaving trace event contexts. |category_group| and
+// |name| specify the context category and type. |context| is a
+// snapshotted context object id.
+#define TRACE_EVENT_ENTER_CONTEXT(category_group, name, context) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ENTER_CONTEXT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(context), TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_LEAVE_CONTEXT(category_group, name, context) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_LEAVE_CONTEXT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(context), TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context) \
+ INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, \
+ TRACE_ID_DONT_MANGLE(context))
+
+// Macro to specify that two trace IDs are identical. For example,
+// TRACE_BIND_IDS(
+// "category", "name",
+// TRACE_ID_WITH_SCOPE("net::URLRequest", 0x1000),
+// TRACE_ID_WITH_SCOPE("blink::ResourceFetcher::FetchRequest", 0x2000))
+// tells the trace consumer that events with ID ("net::URLRequest", 0x1000) from
+// the current process have the same ID as events with ID
+// ("blink::ResourceFetcher::FetchRequest", 0x2000).
+#define TRACE_BIND_IDS(category_group, name, id, bind_id) \
+ INTERNAL_TRACE_EVENT_ADD_BIND_IDS(category_group, name, id, bind_id);
+
+// Macro to efficiently determine if a given category group is enabled.
+#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ *ret = true; \
+ } else { \
+ *ret = false; \
+ } \
+ } while (0)
+
+// Macro to explicitly warm up a given category group. This could be useful in
+// cases where we want to initialize a category group before any trace events
+// for that category group is reported. For example, to have a category group
+// always show up in the "record categories" list for manually selecting
+// settings in about://tracing.
+#define TRACE_EVENT_WARMUP_CATEGORY(category_group) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group)
+
+// Macro to efficiently determine, through polling, if a new trace has begun.
+#define TRACE_EVENT_IS_NEW_TRACE(ret) \
+ do { \
+ static int INTERNAL_TRACE_EVENT_UID(lastRecordingNumber) = 0; \
+ int num_traces_recorded = TRACE_EVENT_API_GET_NUM_TRACES_RECORDED(); \
+ if (num_traces_recorded != -1 && \
+ num_traces_recorded != \
+ INTERNAL_TRACE_EVENT_UID(lastRecordingNumber)) { \
+ INTERNAL_TRACE_EVENT_UID(lastRecordingNumber) = num_traces_recorded; \
+ *ret = true; \
+ } else { \
+ *ret = false; \
+ } \
+ } while (0)
+
+// Notes regarding the following definitions:
+// New values can be added and propagated to third party libraries, but existing
+// definitions must never be changed, because third party libraries may use old
+// definitions.
+
+// Phase indicates the nature of an event entry. E.g. part of a begin/end pair.
+#define TRACE_EVENT_PHASE_BEGIN ('B')
+#define TRACE_EVENT_PHASE_END ('E')
+#define TRACE_EVENT_PHASE_COMPLETE ('X')
+#define TRACE_EVENT_PHASE_INSTANT ('I')
+#define TRACE_EVENT_PHASE_ASYNC_BEGIN ('S')
+#define TRACE_EVENT_PHASE_ASYNC_STEP_INTO ('T')
+#define TRACE_EVENT_PHASE_ASYNC_STEP_PAST ('p')
+#define TRACE_EVENT_PHASE_ASYNC_END ('F')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN ('b')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_END ('e')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT ('n')
+#define TRACE_EVENT_PHASE_FLOW_BEGIN ('s')
+#define TRACE_EVENT_PHASE_FLOW_STEP ('t')
+#define TRACE_EVENT_PHASE_FLOW_END ('f')
+#define TRACE_EVENT_PHASE_METADATA ('M')
+#define TRACE_EVENT_PHASE_COUNTER ('C')
+#define TRACE_EVENT_PHASE_SAMPLE ('P')
+#define TRACE_EVENT_PHASE_CREATE_OBJECT ('N')
+#define TRACE_EVENT_PHASE_SNAPSHOT_OBJECT ('O')
+#define TRACE_EVENT_PHASE_DELETE_OBJECT ('D')
+#define TRACE_EVENT_PHASE_MEMORY_DUMP ('v')
+#define TRACE_EVENT_PHASE_MARK ('R')
+#define TRACE_EVENT_PHASE_CLOCK_SYNC ('c')
+#define TRACE_EVENT_PHASE_ENTER_CONTEXT ('(')
+#define TRACE_EVENT_PHASE_LEAVE_CONTEXT (')')
+#define TRACE_EVENT_PHASE_BIND_IDS ('=')
+
+// Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
+#define TRACE_EVENT_FLAG_NONE (static_cast(0))
+#define TRACE_EVENT_FLAG_COPY (static_cast(1 << 0))
+#define TRACE_EVENT_FLAG_HAS_ID (static_cast(1 << 1))
+#define TRACE_EVENT_FLAG_MANGLE_ID (static_cast(1 << 2))
+#define TRACE_EVENT_FLAG_SCOPE_OFFSET (static_cast(1 << 3))
+#define TRACE_EVENT_FLAG_SCOPE_EXTRA (static_cast(1 << 4))
+#define TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP (static_cast(1 << 5))
+#define TRACE_EVENT_FLAG_ASYNC_TTS (static_cast(1 << 6))
+#define TRACE_EVENT_FLAG_BIND_TO_ENCLOSING (static_cast(1 << 7))
+#define TRACE_EVENT_FLAG_FLOW_IN (static_cast(1 << 8))
+#define TRACE_EVENT_FLAG_FLOW_OUT (static_cast(1 << 9))
+#define TRACE_EVENT_FLAG_HAS_CONTEXT_ID (static_cast(1 << 10))
+#define TRACE_EVENT_FLAG_HAS_PROCESS_ID (static_cast(1 << 11))
+
+#define TRACE_EVENT_FLAG_SCOPE_MASK \
+ (static_cast(TRACE_EVENT_FLAG_SCOPE_OFFSET | \
+ TRACE_EVENT_FLAG_SCOPE_EXTRA))
+
+// Type values for identifying types in the TraceValue union.
+#define TRACE_VALUE_TYPE_BOOL (static_cast(1))
+#define TRACE_VALUE_TYPE_UINT (static_cast(2))
+#define TRACE_VALUE_TYPE_INT (static_cast(3))
+#define TRACE_VALUE_TYPE_DOUBLE (static_cast(4))
+#define TRACE_VALUE_TYPE_POINTER (static_cast(5))
+#define TRACE_VALUE_TYPE_STRING (static_cast(6))
+#define TRACE_VALUE_TYPE_COPY_STRING (static_cast(7))
+#define TRACE_VALUE_TYPE_CONVERTABLE (static_cast(8))
+
+// Enum reflecting the scope of an INSTANT event. Must fit within
+// TRACE_EVENT_FLAG_SCOPE_MASK.
+#define TRACE_EVENT_SCOPE_GLOBAL (static_cast(0 << 3))
+#define TRACE_EVENT_SCOPE_PROCESS (static_cast(1 << 3))
+#define TRACE_EVENT_SCOPE_THREAD (static_cast(2 << 3))
+
+#define TRACE_EVENT_SCOPE_NAME_GLOBAL ('g')
+#define TRACE_EVENT_SCOPE_NAME_PROCESS ('p')
+#define TRACE_EVENT_SCOPE_NAME_THREAD ('t')
+
+
+// The pointer returned from GetCategoryGroupEnabled() points to a
+// value with zero or more of the following bits. Used in this class only.
+// The TRACE_EVENT macros should only use the value as a bool.
+// These values must be in sync with macro values in trace_log.h in
+// chromium.
+enum CategoryGroupEnabledFlags {
+ // Category group enabled for the recording mode.
+ kEnabledForRecording_CategoryGroupEnabledFlags = 1 << 0,
+ // Category group enabled by SetEventCallbackEnabled().
+ kEnabledForEventCallback_CategoryGroupEnabledFlags = 1 << 2,
+ // Category group enabled to export events to ETW.
+ kEnabledForETWExport_CategoryGroupEnabledFlags = 1 << 3,
+};
+
+// By default, const char* asrgument values are assumed to have long-lived scope
+// and will not be copied. Use this macro to force a const char* to be copied.
+#define TRACE_STR_COPY(str) node::tracing::TraceStringWithCopy(str)
+
+// By default, uint64 ID argument values are not mangled with the Process ID in
+// TRACE_EVENT_ASYNC macros. Use this macro to force Process ID mangling.
+#define TRACE_ID_MANGLE(id) node::tracing::TraceID::ForceMangle(id)
+
+// By default, pointers are mangled with the Process ID in TRACE_EVENT_ASYNC
+// macros. Use this macro to prevent Process ID mangling.
+#define TRACE_ID_DONT_MANGLE(id) node::tracing::TraceID::DontMangle(id)
+
+// By default, trace IDs are eventually converted to a single 64-bit number. Use
+// this macro to add a scope string.
+#define TRACE_ID_WITH_SCOPE(scope, id) \
+ node::tracing::TraceID::WithScope(scope, id)
+
+// Sets the current sample state to the given category and name (both must be
+// constant strings). These states are intended for a sampling profiler.
+// Implementation note: we store category and name together because we don't
+// want the inconsistency/expense of storing two pointers.
+// |thread_bucket| is [0..2] and is used to statically isolate samples in one
+// thread from others.
+#define TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(bucket_number, category, \
+ name) \
+ node::tracing::TraceEventSamplingStateScope::Set( \
+ category "\0" name)
+
+// Returns a current sampling state of the given bucket.
+#define TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(bucket_number) \
+ node::tracing::TraceEventSamplingStateScope::Current()
+
+// Creates a scope of a sampling state of the given bucket.
+//
+// { // The sampling state is set within this scope.
+// TRACE_EVENT_SAMPLING_STATE_SCOPE_FOR_BUCKET(0, "category", "name");
+// ...;
+// }
+#define TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(bucket_number, category, \
+ name) \
+ node::tracing::TraceEventSamplingStateScope \
+ traceEventSamplingScope(category "\0" name);
+
+
+#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \
+ *INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \
+ (kEnabledForRecording_CategoryGroupEnabledFlags | \
+ kEnabledForEventCallback_CategoryGroupEnabledFlags)
+
+// The following macro has no implementation, but it needs to exist since
+// it gets called from scoped trace events. It cannot call UNIMPLEMENTED()
+// since an empty implementation is a valid one.
+#define INTERNAL_TRACE_MEMORY(category, name)
+
+////////////////////////////////////////////////////////////////////////////////
+// Implementation specific tracing API definitions.
+
+// Get a pointer to the enabled state of the given trace category. Only
+// long-lived literal strings should be given as the category group. The
+// returned pointer can be held permanently in a local static for example. If
+// the unsigned char is non-zero, tracing is enabled. If tracing is enabled,
+// TRACE_EVENT_API_ADD_TRACE_EVENT can be called. It's OK if tracing is disabled
+// between the load of the tracing state and the call to
+// TRACE_EVENT_API_ADD_TRACE_EVENT, because this flag only provides an early out
+// for best performance when tracing is disabled.
+// const uint8_t*
+// TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(const char* category_group)
+#define TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED \
+ node::tracing::TraceEventHelper::GetCurrentPlatform() \
+ ->GetCategoryGroupEnabled
+
+// Get the number of times traces have been recorded. This is used to implement
+// the TRACE_EVENT_IS_NEW_TRACE facility.
+// unsigned int TRACE_EVENT_API_GET_NUM_TRACES_RECORDED()
+#define TRACE_EVENT_API_GET_NUM_TRACES_RECORDED UNIMPLEMENTED()
+
+// Add a trace event to the platform tracing system.
+// uint64_t TRACE_EVENT_API_ADD_TRACE_EVENT(
+// char phase,
+// const uint8_t* category_group_enabled,
+// const char* name,
+// const char* scope,
+// uint64_t id,
+// uint64_t bind_id,
+// int num_args,
+// const char** arg_names,
+// const uint8_t* arg_types,
+// const uint64_t* arg_values,
+// unsigned int flags)
+#define TRACE_EVENT_API_ADD_TRACE_EVENT \
+ node::tracing::TraceEventHelper::GetCurrentPlatform()->AddTraceEvent
+
+// Set the duration field of a COMPLETE trace event.
+// void TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
+// const uint8_t* category_group_enabled,
+// const char* name,
+// uint64_t id)
+#define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION \
+ node::tracing::TraceEventHelper::GetCurrentPlatform() \
+ ->UpdateTraceEventDuration
+
+// Defines atomic operations used internally by the tracing system.
+#define TRACE_EVENT_API_ATOMIC_WORD intptr_t
+#define TRACE_EVENT_API_ATOMIC_LOAD(var) (var)
+#define TRACE_EVENT_API_ATOMIC_STORE(var, value) (var) = (value)
+
+// The thread buckets for the sampling profiler.
+extern TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
+
+#define TRACE_EVENT_API_THREAD_BUCKET(thread_bucket) \
+ g_trace_state[thread_bucket]
+
+////////////////////////////////////////////////////////////////////////////////
+
+// Implementation detail: trace event macros create temporary variables
+// to keep instrumentation overhead low. These macros give each temporary
+// variable a unique name based on the line number to prevent name collisions.
+#define INTERNAL_TRACE_EVENT_UID3(a, b) trace_event_unique_##a##b
+#define INTERNAL_TRACE_EVENT_UID2(a, b) INTERNAL_TRACE_EVENT_UID3(a, b)
+#define INTERNAL_TRACE_EVENT_UID(name_prefix) \
+ INTERNAL_TRACE_EVENT_UID2(name_prefix, __LINE__)
+
+// Implementation detail: internal macro to create static category.
+// No barriers are needed, because this code is designed to operate safely
+// even when the unsigned char* points to garbage data (which may be the case
+// on processors without cache coherency).
+// TODO(fmeawad): This implementation contradicts that we can have a different
+// configuration for each isolate,
+// https://code.google.com/p/v8/issues/detail?id=4563
+#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \
+ category_group, atomic, category_group_enabled) \
+ category_group_enabled = \
+ reinterpret_cast(TRACE_EVENT_API_ATOMIC_LOAD(atomic)); \
+ if (!category_group_enabled) { \
+ category_group_enabled = \
+ TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category_group); \
+ TRACE_EVENT_API_ATOMIC_STORE( \
+ atomic, reinterpret_cast( \
+ category_group_enabled)); \
+ }
+
+#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group) \
+ static TRACE_EVENT_API_ATOMIC_WORD INTERNAL_TRACE_EVENT_UID(atomic) = 0; \
+ const uint8_t* INTERNAL_TRACE_EVENT_UID(category_group_enabled); \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \
+ category_group, INTERNAL_TRACE_EVENT_UID(atomic), \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled));
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ node::tracing::AddTraceEvent( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ node::tracing::kGlobalScope, node::tracing::kNoId, \
+ node::tracing::kNoId, flags, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+// Implementation detail: internal macro to create static category and add begin
+// event if the category is enabled. Also adds the end event when the scope
+// ends.
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ node::tracing::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ uint64_t h = node::tracing::AddTraceEvent( \
+ TRACE_EVENT_PHASE_COMPLETE, \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ node::tracing::kGlobalScope, node::tracing::kNoId, \
+ node::tracing::kNoId, TRACE_EVENT_FLAG_NONE, ##__VA_ARGS__); \
+ INTERNAL_TRACE_EVENT_UID(tracer) \
+ .Initialize(INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ h); \
+ }
+
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, \
+ bind_id, flow_flags, ...) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ node::tracing::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ unsigned int trace_event_flags = flow_flags; \
+ node::tracing::TraceID trace_event_bind_id(bind_id, \
+ &trace_event_flags); \
+ uint64_t h = node::tracing::AddTraceEvent( \
+ TRACE_EVENT_PHASE_COMPLETE, \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ node::tracing::kGlobalScope, node::tracing::kNoId, \
+ trace_event_bind_id.raw_id(), trace_event_flags, ##__VA_ARGS__); \
+ INTERNAL_TRACE_EVENT_UID(tracer) \
+ .Initialize(INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ h); \
+ }
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category_group, name, id, \
+ flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ unsigned int trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
+ node::tracing::TraceID trace_event_trace_id(id, \
+ &trace_event_flags); \
+ node::tracing::AddTraceEvent( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \
+ node::tracing::kNoId, trace_event_flags, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+// Adds a trace event with a given timestamp. Not Implemented.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(phase, category_group, name, \
+ timestamp, flags, ...) \
+ UNIMPLEMENTED()
+
+// Adds a trace event with a given id and timestamp. Not Implemented.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_AND_TIMESTAMP( \
+ phase, category_group, name, id, timestamp, flags, ...) \
+ UNIMPLEMENTED()
+
+// Adds a trace event with a given id, thread_id, and timestamp. Not
+// Implemented.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ phase, category_group, name, id, thread_id, timestamp, flags, ...) \
+ UNIMPLEMENTED()
+
+// Enter and leave a context based on the current scope.
+#define INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context) \
+ struct INTERNAL_TRACE_EVENT_UID(ScopedContext) { \
+ public: \
+ INTERNAL_TRACE_EVENT_UID(ScopedContext)(uint64_t cid) : cid_(cid) { \
+ TRACE_EVENT_ENTER_CONTEXT(category_group, name, cid_); \
+ } \
+ ~INTERNAL_TRACE_EVENT_UID(ScopedContext)() { \
+ TRACE_EVENT_LEAVE_CONTEXT(category_group, name, cid_); \
+ } \
+ \
+ private: \
+ /* Local class friendly DISALLOW_COPY_AND_ASSIGN */ \
+ INTERNAL_TRACE_EVENT_UID(ScopedContext) \
+ (const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {} \
+ void operator=(const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {} \
+ uint64_t cid_; \
+ }; \
+ INTERNAL_TRACE_EVENT_UID(ScopedContext) \
+ INTERNAL_TRACE_EVENT_UID(scoped_context)(context.raw_id());
+
+namespace node {
+namespace tracing {
+
+// Specify these values when the corresponding argument of AddTraceEvent is not
+// used.
+const int kZeroNumArgs = 0;
+const decltype(nullptr) kGlobalScope = nullptr;
+const uint64_t kNoId = 0;
+
+class TraceEventHelper {
+ public:
+ static void SetCurrentPlatform(v8::Platform* platform);
+ static v8::Platform* GetCurrentPlatform();
+};
+
+// TraceID encapsulates an ID that can either be an integer or pointer. Pointers
+// are by default mangled with the Process ID so that they are unlikely to
+// collide when the same pointer is used on different processes.
+class TraceID {
+ public:
+ class WithScope {
+ public:
+ WithScope(const char* scope, uint64_t raw_id)
+ : scope_(scope), raw_id_(raw_id) {}
+ uint64_t raw_id() const { return raw_id_; }
+ const char* scope() const { return scope_; }
+
+ private:
+ const char* scope_ = nullptr;
+ uint64_t raw_id_;
+ };
+
+ class DontMangle {
+ public:
+ explicit DontMangle(const void* raw_id)
+ : raw_id_(static_cast(reinterpret_cast(raw_id))) {}
+ explicit DontMangle(uint64_t raw_id) : raw_id_(raw_id) {}
+ explicit DontMangle(unsigned int raw_id) : raw_id_(raw_id) {}
+ explicit DontMangle(uint16_t raw_id) : raw_id_(raw_id) {}
+ explicit DontMangle(unsigned char raw_id) : raw_id_(raw_id) {}
+ explicit DontMangle(int64_t raw_id)
+ : raw_id_(static_cast(raw_id)) {}
+ explicit DontMangle(int raw_id) : raw_id_(static_cast(raw_id)) {}
+ explicit DontMangle(int16_t raw_id)
+ : raw_id_(static_cast(raw_id)) {}
+ explicit DontMangle(signed char raw_id)
+ : raw_id_(static_cast(raw_id)) {}
+ explicit DontMangle(WithScope scoped_id)
+ : scope_(scoped_id.scope()), raw_id_(scoped_id.raw_id()) {}
+ const char* scope() const { return scope_; }
+ uint64_t raw_id() const { return raw_id_; }
+
+ private:
+ const char* scope_ = nullptr;
+ uint64_t raw_id_;
+ };
+
+ class ForceMangle {
+ public:
+ explicit ForceMangle(uint64_t raw_id) : raw_id_(raw_id) {}
+ explicit ForceMangle(unsigned int raw_id) : raw_id_(raw_id) {}
+ explicit ForceMangle(uint16_t raw_id) : raw_id_(raw_id) {}
+ explicit ForceMangle(unsigned char raw_id) : raw_id_(raw_id) {}
+ explicit ForceMangle(int64_t raw_id)
+ : raw_id_(static_cast(raw_id)) {}
+ explicit ForceMangle(int raw_id) : raw_id_(static_cast(raw_id)) {}
+ explicit ForceMangle(int16_t raw_id)
+ : raw_id_(static_cast(raw_id)) {}
+ explicit ForceMangle(signed char raw_id)
+ : raw_id_(static_cast(raw_id)) {}
+ uint64_t raw_id() const { return raw_id_; }
+
+ private:
+ uint64_t raw_id_;
+ };
+
+ TraceID(const void* raw_id, unsigned int* flags)
+ : raw_id_(static_cast(reinterpret_cast(raw_id))) {
+ *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
+ }
+ TraceID(ForceMangle raw_id, unsigned int* flags) : raw_id_(raw_id.raw_id()) {
+ *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
+ }
+ TraceID(DontMangle maybe_scoped_id, unsigned int* flags)
+ : scope_(maybe_scoped_id.scope()), raw_id_(maybe_scoped_id.raw_id()) {}
+ TraceID(uint64_t raw_id, unsigned int* flags) : raw_id_(raw_id) {
+ (void)flags;
+ }
+ TraceID(unsigned int raw_id, unsigned int* flags) : raw_id_(raw_id) {
+ (void)flags;
+ }
+ TraceID(uint16_t raw_id, unsigned int* flags) : raw_id_(raw_id) {
+ (void)flags;
+ }
+ TraceID(unsigned char raw_id, unsigned int* flags) : raw_id_(raw_id) {
+ (void)flags;
+ }
+ TraceID(int64_t raw_id, unsigned int* flags)
+ : raw_id_(static_cast(raw_id)) {
+ (void)flags;
+ }
+ TraceID(int raw_id, unsigned int* flags)
+ : raw_id_(static_cast(raw_id)) {
+ (void)flags;
+ }
+ TraceID(int16_t raw_id, unsigned int* flags)
+ : raw_id_(static_cast(raw_id)) {
+ (void)flags;
+ }
+ TraceID(signed char raw_id, unsigned int* flags)
+ : raw_id_(static_cast(raw_id)) {
+ (void)flags;
+ }
+ TraceID(WithScope scoped_id, unsigned int* flags)
+ : scope_(scoped_id.scope()), raw_id_(scoped_id.raw_id()) {}
+
+ uint64_t raw_id() const { return raw_id_; }
+ const char* scope() const { return scope_; }
+
+ private:
+ const char* scope_ = nullptr;
+ uint64_t raw_id_;
+};
+
+// Simple union to store various types as uint64_t.
+union TraceValueUnion {
+ bool as_bool;
+ uint64_t as_uint;
+ int64_t as_int;
+ double as_double;
+ const void* as_pointer;
+ const char* as_string;
+};
+
+// Simple container for const char* that should be copied instead of retained.
+class TraceStringWithCopy {
+ public:
+ explicit TraceStringWithCopy(const char* str) : str_(str) {}
+ operator const char*() const { return str_; }
+
+ private:
+ const char* str_;
+};
+
+// Define SetTraceValue for each allowed type. It stores the type and
+// value in the return arguments. This allows this API to avoid declaring any
+// structures so that it is portable to third_party libraries.
+#define INTERNAL_DECLARE_SET_TRACE_VALUE(actual_type, union_member, \
+ value_type_id) \
+ static inline void SetTraceValue(actual_type arg, unsigned char* type, \
+ uint64_t* value) { \
+ TraceValueUnion type_value; \
+ type_value.union_member = arg; \
+ *type = value_type_id; \
+ *value = type_value.as_uint; \
+ }
+// Simpler form for int types that can be safely casted.
+#define INTERNAL_DECLARE_SET_TRACE_VALUE_INT(actual_type, value_type_id) \
+ static inline void SetTraceValue(actual_type arg, unsigned char* type, \
+ uint64_t* value) { \
+ *type = value_type_id; \
+ *value = static_cast(arg); \
+ }
+
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint64_t, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned int, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint16_t, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned char, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int64_t, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int16_t, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(signed char, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE(bool, as_bool, TRACE_VALUE_TYPE_BOOL)
+INTERNAL_DECLARE_SET_TRACE_VALUE(double, as_double, TRACE_VALUE_TYPE_DOUBLE)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const void*, as_pointer,
+ TRACE_VALUE_TYPE_POINTER)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const char*, as_string,
+ TRACE_VALUE_TYPE_STRING)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&, as_string,
+ TRACE_VALUE_TYPE_COPY_STRING)
+
+#undef INTERNAL_DECLARE_SET_TRACE_VALUE
+#undef INTERNAL_DECLARE_SET_TRACE_VALUE_INT
+
+// These AddTraceEvent template
+// function is defined here instead of in the macro, because the arg_values
+// could be temporary objects, such as std::string. In order to store
+// pointers to the internal c_str and pass through to the tracing API,
+// the arg_values must live throughout these procedures.
+
+static inline uint64_t AddTraceEvent(char phase,
+ const uint8_t* category_group_enabled,
+ const char* name, const char* scope,
+ uint64_t id, uint64_t bind_id,
+ unsigned int flags) {
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(phase, category_group_enabled, name,
+ scope, id, bind_id, kZeroNumArgs, NULL,
+ NULL, NULL, flags);
+}
+
+template
+static inline uint64_t AddTraceEvent(
+ char phase, const uint8_t* category_group_enabled, const char* name,
+ const char* scope, uint64_t id, uint64_t bind_id, unsigned int flags,
+ const char* arg1_name, const ARG1_TYPE& arg1_val) {
+ const int num_args = 1;
+ uint8_t arg_types[1];
+ uint64_t arg_values[1];
+ SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(
+ phase, category_group_enabled, name, scope, id, bind_id, num_args,
+ &arg1_name, arg_types, arg_values, flags);
+}
+
+template
+static inline uint64_t AddTraceEvent(
+ char phase, const uint8_t* category_group_enabled, const char* name,
+ const char* scope, uint64_t id, uint64_t bind_id, unsigned int flags,
+ const char* arg1_name, const ARG1_TYPE& arg1_val, const char* arg2_name,
+ const ARG2_TYPE& arg2_val) {
+ const int num_args = 2;
+ const char* arg_names[2] = {arg1_name, arg2_name};
+ unsigned char arg_types[2];
+ uint64_t arg_values[2];
+ SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+ SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(
+ phase, category_group_enabled, name, scope, id, bind_id, num_args,
+ arg_names, arg_types, arg_values, flags);
+}
+
+// Used by TRACE_EVENTx macros. Do not use directly.
+class ScopedTracer {
+ public:
+ // Note: members of data_ intentionally left uninitialized. See Initialize.
+ ScopedTracer() : p_data_(NULL) {}
+
+ ~ScopedTracer() {
+ if (p_data_ && *data_.category_group_enabled)
+ TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
+ data_.category_group_enabled, data_.name, data_.event_handle);
+ }
+
+ void Initialize(const uint8_t* category_group_enabled, const char* name,
+ uint64_t event_handle) {
+ data_.category_group_enabled = category_group_enabled;
+ data_.name = name;
+ data_.event_handle = event_handle;
+ p_data_ = &data_;
+ }
+
+ private:
+ // This Data struct workaround is to avoid initializing all the members
+ // in Data during construction of this object, since this object is always
+ // constructed, even when tracing is disabled. If the members of Data were
+ // members of this class instead, compiler warnings occur about potential
+ // uninitialized accesses.
+ struct Data {
+ const uint8_t* category_group_enabled;
+ const char* name;
+ uint64_t event_handle;
+ };
+ Data* p_data_;
+ Data data_;
+};
+
+// Used by TRACE_EVENT_BINARY_EFFICIENTx macro. Do not use directly.
+class ScopedTraceBinaryEfficient {
+ public:
+ ScopedTraceBinaryEfficient(const char* category_group, const char* name);
+ ~ScopedTraceBinaryEfficient();
+
+ private:
+ const uint8_t* category_group_enabled_;
+ const char* name_;
+ uint64_t event_handle_;
+};
+
+// TraceEventSamplingStateScope records the current sampling state
+// and sets a new sampling state. When the scope exists, it restores
+// the sampling state having recorded.
+template
+class TraceEventSamplingStateScope {
+ public:
+ explicit TraceEventSamplingStateScope(const char* category_and_name) {
+ previous_state_ = TraceEventSamplingStateScope::Current();
+ TraceEventSamplingStateScope::Set(category_and_name);
+ }
+
+ ~TraceEventSamplingStateScope() {
+ TraceEventSamplingStateScope::Set(previous_state_);
+ }
+
+ static inline const char* Current() {
+ return reinterpret_cast(
+ TRACE_EVENT_API_ATOMIC_LOAD(g_trace_state[BucketNumber]));
+ }
+
+ static inline void Set(const char* category_and_name) {
+ TRACE_EVENT_API_ATOMIC_STORE(g_trace_state[BucketNumber],
+ reinterpret_cast(
+ const_cast(category_and_name)));
+ }
+
+ private:
+ const char* previous_state_;
+};
+
+} // namespace tracing
+} // namespace node
+
+#endif // SRC_TRACING_TRACE_EVENT_H_
diff --git a/src/util.h b/src/util.h
index b32f2b8a71ca0f..cf530a4da9146c 100644
--- a/src/util.h
+++ b/src/util.h
@@ -10,6 +10,7 @@
#include
#include
#include
+#include
#include // std::remove_reference
@@ -304,29 +305,40 @@ class MaybeStackBuffer {
return length_;
}
- // Call to make sure enough space for `storage` entries is available.
- // There can only be 1 call to AllocateSufficientStorage or Invalidate
- // per instance.
+ // Current maximum capacity of the buffer with which SetLength() can be used
+ // without first calling AllocateSufficientStorage().
+ size_t capacity() const {
+ return IsAllocated() ? capacity_ :
+ IsInvalidated() ? 0 : kStackStorageSize;
+ }
+
+ // Make sure enough space for `storage` entries is available.
+ // This method can be called multiple times throughout the lifetime of the
+ // buffer, but once this has been called Invalidate() cannot be used.
+ // Content of the buffer in the range [0, length()) is preserved.
void AllocateSufficientStorage(size_t storage) {
- if (storage <= kStackStorageSize) {
- buf_ = buf_st_;
- } else {
- buf_ = Malloc(storage);
+ CHECK(!IsInvalidated());
+ if (storage > capacity()) {
+ bool was_allocated = IsAllocated();
+ T* allocated_ptr = was_allocated ? buf_ : nullptr;
+ buf_ = Realloc(allocated_ptr, storage);
+ capacity_ = storage;
+ if (!was_allocated && length_ > 0)
+ memcpy(buf_, buf_st_, length_ * sizeof(buf_[0]));
}
- // Remember how much was allocated to check against that in SetLength().
length_ = storage;
}
void SetLength(size_t length) {
- // length_ stores how much memory was allocated.
- CHECK_LE(length, length_);
+ // capacity() returns how much memory is actually available.
+ CHECK_LE(length, capacity());
length_ = length;
}
void SetLengthAndZeroTerminate(size_t length) {
- // length_ stores how much memory was allocated.
- CHECK_LE(length + 1, length_);
+ // capacity() returns how much memory is actually available.
+ CHECK_LE(length + 1, capacity());
SetLength(length);
// T() is 0 for integer types, nullptr for pointers, etc.
@@ -334,24 +346,35 @@ class MaybeStackBuffer {
}
// Make derefencing this object return nullptr.
- // Calling this is mutually exclusive with calling
- // AllocateSufficientStorage.
+ // This method can be called multiple times throughout the lifetime of the
+ // buffer, but once this has been called AllocateSufficientStorage() cannot
+ // be used.
void Invalidate() {
- CHECK_EQ(buf_, buf_st_);
+ CHECK(!IsAllocated());
length_ = 0;
buf_ = nullptr;
}
- bool IsAllocated() {
- return buf_ != buf_st_;
+ // If the buffer is stored in the heap rather than on the stack.
+ bool IsAllocated() const {
+ return !IsInvalidated() && buf_ != buf_st_;
+ }
+
+ // If Invalidate() has been called.
+ bool IsInvalidated() const {
+ return buf_ == nullptr;
}
+ // Release ownership of the malloc'd buffer.
+ // Note: This does not free the buffer.
void Release() {
+ CHECK(IsAllocated());
buf_ = buf_st_;
length_ = 0;
+ capacity_ = 0;
}
- MaybeStackBuffer() : length_(0), buf_(buf_st_) {
+ MaybeStackBuffer() : length_(0), capacity_(0), buf_(buf_st_) {
// Default to a zero-length, null-terminated buffer.
buf_[0] = T();
}
@@ -361,12 +384,14 @@ class MaybeStackBuffer {
}
~MaybeStackBuffer() {
- if (buf_ != buf_st_)
+ if (IsAllocated())
free(buf_);
}
private:
size_t length_;
+ // capacity of the malloc'ed buf_
+ size_t capacity_;
T* buf_;
T buf_st_[kStackStorageSize];
};
@@ -394,7 +419,7 @@ class BufferValue : public MaybeStackBuffer {
#define SPREAD_BUFFER_ARG(val, name) \
CHECK((val)->IsUint8Array()); \
- Local name = (val).As(); \
+ v8::Local name = (val).As(); \
v8::ArrayBuffer::Contents name##_c = name->Buffer()->GetContents(); \
const size_t name##_offset = name->ByteOffset(); \
const size_t name##_length = name->ByteLength(); \
diff --git a/test/README.md b/test/README.md
index 5ed028a19631d6..949a275792abb0 100644
--- a/test/README.md
+++ b/test/README.md
@@ -1,147 +1,154 @@
-# Table of Contents
-* [Test directories](#test-directories)
-* [Common module API](#common-module-api)
-
-## Test Directories
-
-### abort
-
-Tests for when the `--abort-on-uncaught-exception` flag is used.
-
-| Runs on CI |
-|:----------:|
-| No |
-
-### addons
-
-Tests for [addon](https://nodejs.org/api/addons.html) functionality along with
-some tests that require an addon to function properly.
-
-
-| Runs on CI |
-|:----------:|
-| Yes |
-
-### cctest
-
-C++ test that is run as part of the build process.
-
-| Runs on CI |
-|:----------:|
-| Yes |
-
-### debugger
-
-Tests for [debugger](https://nodejs.org/api/debugger.html) functionality.
-
-| Runs on CI |
-|:----------:|
-| No |
-
-### disabled
-
-Tests that have been disabled from running for various reasons.
-
-| Runs on CI |
-|:----------:|
-| No |
-
-### fixtures
-
-Test fixtures used in various tests throughout the test suite.
-
-### gc
-
-Tests for garbage collection related functionality.
-
-| Runs on CI |
-|:----------:|
-| No |
-
-
-### inspector
+# Node.js Core Tests
-Tests for the V8 inspector integration.
+This folder contains code and data used to test the Node.js implementation.
-| Runs on CI |
-|:----------:|
-| Yes |
+For a detailed guide on how to write tests in this
+directory, see [the guide on writing tests](../doc/guides/writing-tests.md).
-### internet
+On how to run tests in this direcotry, see
+[the contributing guide](../CONTRIBUTING.md#step-5-test).
-Tests that make real outbound connections (mainly networking related modules).
-Tests for networking related modules may also be present in other directories,
-but those tests do not make outbound connections.
+## Table of Contents
-| Runs on CI |
-|:----------:|
-| No |
-
-### known_issues
-
-Tests reproducing known issues within the system.
-
-| Runs on CI |
-|:----------:|
-| No |
-
-### message
-
-Tests for messages that are output for various conditions (`console.log`,
-error messages etc.)
-
-| Runs on CI |
-|:----------:|
-| Yes |
-
-### parallel
-
-Various tests that are able to be run in parallel.
-
-| Runs on CI |
-|:----------:|
-| Yes |
-
-### pummel
-
-Various tests for various modules / system functionality operating under load.
-
-| Runs on CI |
-|:----------:|
-| No |
-
-### sequential
-
-Various tests that are run sequentially.
-
-| Runs on CI |
-|:----------:|
-| Yes |
-
-### testpy
-
-Test configuration utility used by various test suites.
-
-### tick-processor
-
-Tests for the V8 tick processor integration. The tests are for the logic in
-`lib/internal/v8_prof_processor.js` and `lib/internal/v8_prof_polyfill.js`. The
-tests confirm that the profile processor packages the correct set of scripts
-from V8 and introduces the correct platform specific logic.
-
-| Runs on CI |
-|:----------:|
-| No |
-
-### timers
-
-Tests for [timing utilities](https://nodejs.org/api/timers.html) (`setTimeout`
-and `setInterval`).
+* [Test directories](#test-directories)
+* [Common module API](#common-module-api)
-| Runs on CI |
-|:----------:|
-| No |
+## Test Directories
+
+
+
+ Directory
+ Runs on CI
+ Purpose
+
+
+
+
+ abort
+ No
+
+ Tests for when the --abort-on-uncaught-exception
+ flag is used.
+
+
+
+ addons
+ Yes
+
+ Tests for addon
+ functionality along with some tests that require an addon to function
+ properly.
+
+
+
+ cctest
+ Yes
+
+ C++ test that is run as part of the build process.
+
+
+
+ debugger
+ No
+
+ Tests for debugger
+ functionality along with some tests that require an addon to function
+ properly.
+
+
+
+ disabled
+ No
+
+ Tests that have been disabled from running for various reasons.
+
+
+
+ fixtures
+
+ Test fixtures used in various tests throughout the test suite.
+
+
+ gc
+ No
+ Tests for garbage collection related functionality.
+
+
+ inspector
+ Yes
+ Tests for the V8 inspector integration.
+
+
+ internet
+ No
+
+ Tests that make real outbound connections (mainly networking related
+ modules). Tests for networking related modules may also be present in
+ other directories, but those tests do not make outbound connections.
+
+
+
+ known_issues
+ No
+ Tests reproducing known issues within the system.
+
+
+ message
+ Yes
+
+ Tests for messages that are output for various conditions
+ (console.log
, error messages etc.)
+
+
+ parallel
+ Yes
+ Various tests that are able to be run in parallel.
+
+
+ pummel
+ No
+
+ Various tests for various modules / system functionality operating
+ under load.
+
+
+
+ sequential
+ Yes
+
+ Various tests that are run sequentially.
+
+
+
+ testpy
+
+
+ Test configuration utility used by various test suites.
+
+
+
+ tick-processor
+ No
+
+ Tests for the V8 tick processor integration. The tests are for the
+ logic in lib/internal/v8_prof_processor.js
and
+ lib/internal/v8_prof_polyfill.js
. The tests confirm that
+ the profile processor packages the correct set of scripts from V8 and
+ introduces the correct platform specific logic.
+
+
+
+ timers
+ No
+
+ Tests for
+ timing utilities
+ (setTimeout
and setInterval
).
+
+
+
+
## Common module API
@@ -180,6 +187,24 @@ Platform normalizes the `dd` command
Check if there is more than 1gb of total memory.
+### expectsError(settings)
+* `settings` [<Object>](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object)
+ with the following optional properties:
+ * `code` [<String>](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Data_structures#String_type)
+ expected error must have this value for its `code` property
+ * `type` [<Function>](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function)
+ expected error must be an instance of `type`
+ * `message` [<String>](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Data_structures#String_type)
+ or [<RegExp>](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/RegExp)
+ if a string is provided for `message`, expected error must have it for its
+ `message` property; if a regular expression is provided for `message`, the
+ regular expression must match the `message` property of the expected error
+
+* return function suitable for use as a validation function passed as the second
+ argument to `assert.throws()`
+
+The expected error should be [subclassed by the `internal/errors` module](https://github.com/nodejs/node/blob/master/doc/guides/using-internal-errors.md#api).
+
### expectWarning(name, expected)
* `name` [<String>](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Data_structures#String_type)
* `expected` [<String>](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Data_structures#String_type) | [<Array>](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array)
diff --git a/test/cctest/util.cc b/test/cctest/util.cc
index f99a46cdf5353b..a6ece3c6f4d377 100644
--- a/test/cctest/util.cc
+++ b/test/cctest/util.cc
@@ -132,3 +132,127 @@ TEST(UtilTest, UncheckedCalloc) {
TEST_AND_FREE(UncheckedCalloc(0));
TEST_AND_FREE(UncheckedCalloc(1));
}
+
+template
+static void MaybeStackBufferBasic() {
+ using node::MaybeStackBuffer;
+
+ MaybeStackBuffer buf;
+ size_t old_length;
+ size_t old_capacity;
+
+ /* Default constructor */
+ EXPECT_EQ(0U, buf.length());
+ EXPECT_FALSE(buf.IsAllocated());
+ EXPECT_GT(buf.capacity(), buf.length());
+
+ /* SetLength() expansion */
+ buf.SetLength(buf.capacity());
+ EXPECT_EQ(buf.capacity(), buf.length());
+ EXPECT_FALSE(buf.IsAllocated());
+
+ /* Means of accessing raw buffer */
+ EXPECT_EQ(buf.out(), *buf);
+ EXPECT_EQ(&buf[0], *buf);
+
+ /* Basic I/O */
+ for (size_t i = 0; i < buf.length(); i++)
+ buf[i] = static_cast(i);
+ for (size_t i = 0; i < buf.length(); i++)
+ EXPECT_EQ(static_cast(i), buf[i]);
+
+ /* SetLengthAndZeroTerminate() */
+ buf.SetLengthAndZeroTerminate(buf.capacity() - 1);
+ EXPECT_EQ(buf.capacity() - 1, buf.length());
+ for (size_t i = 0; i < buf.length(); i++)
+ EXPECT_EQ(static_cast(i), buf[i]);
+ buf.SetLength(buf.capacity());
+ EXPECT_EQ(0, buf[buf.length() - 1]);
+
+ /* Initial Realloc */
+ old_length = buf.length() - 1;
+ old_capacity = buf.capacity();
+ buf.AllocateSufficientStorage(buf.capacity() * 2);
+ EXPECT_EQ(buf.capacity(), buf.length());
+ EXPECT_TRUE(buf.IsAllocated());
+ for (size_t i = 0; i < old_length; i++)
+ EXPECT_EQ(static_cast(i), buf[i]);
+ EXPECT_EQ(0, buf[old_length]);
+
+ /* SetLength() reduction and expansion */
+ for (size_t i = 0; i < buf.length(); i++)
+ buf[i] = static_cast(i);
+ buf.SetLength(10);
+ for (size_t i = 0; i < buf.length(); i++)
+ EXPECT_EQ(static_cast(i), buf[i]);
+ buf.SetLength(buf.capacity());
+ for (size_t i = 0; i < buf.length(); i++)
+ EXPECT_EQ(static_cast(i), buf[i]);
+
+ /* Subsequent Realloc */
+ old_length = buf.length();
+ old_capacity = buf.capacity();
+ buf.AllocateSufficientStorage(old_capacity * 1.5);
+ EXPECT_EQ(buf.capacity(), buf.length());
+ EXPECT_EQ(static_cast(old_capacity * 1.5), buf.length());
+ EXPECT_TRUE(buf.IsAllocated());
+ for (size_t i = 0; i < old_length; i++)
+ EXPECT_EQ(static_cast(i), buf[i]);
+
+ /* Basic I/O on Realloc'd buffer */
+ for (size_t i = 0; i < buf.length(); i++)
+ buf[i] = static_cast(i);
+ for (size_t i = 0; i < buf.length(); i++)
+ EXPECT_EQ(static_cast(i), buf[i]);
+
+ /* Release() */
+ T* rawbuf = buf.out();
+ buf.Release();
+ EXPECT_EQ(0U, buf.length());
+ EXPECT_FALSE(buf.IsAllocated());
+ EXPECT_GT(buf.capacity(), buf.length());
+ free(rawbuf);
+}
+
+TEST(UtilTest, MaybeStackBuffer) {
+ using node::MaybeStackBuffer;
+
+ MaybeStackBufferBasic();
+ MaybeStackBufferBasic();
+
+ // Constructor with size parameter
+ {
+ MaybeStackBuffer buf(100);
+ EXPECT_EQ(100U, buf.length());
+ EXPECT_FALSE(buf.IsAllocated());
+ EXPECT_GT(buf.capacity(), buf.length());
+ buf.SetLength(buf.capacity());
+ EXPECT_EQ(buf.capacity(), buf.length());
+ EXPECT_FALSE(buf.IsAllocated());
+ for (size_t i = 0; i < buf.length(); i++)
+ buf[i] = static_cast(i);
+ for (size_t i = 0; i < buf.length(); i++)
+ EXPECT_EQ(static_cast(i), buf[i]);
+
+ MaybeStackBuffer bigbuf(10000);
+ EXPECT_EQ(10000U, bigbuf.length());
+ EXPECT_TRUE(bigbuf.IsAllocated());
+ EXPECT_EQ(bigbuf.length(), bigbuf.capacity());
+ for (size_t i = 0; i < bigbuf.length(); i++)
+ bigbuf[i] = static_cast(i);
+ for (size_t i = 0; i < bigbuf.length(); i++)
+ EXPECT_EQ(static_cast(i), bigbuf[i]);
+ }
+
+ // Invalidated buffer
+ {
+ MaybeStackBuffer buf;
+ buf.Invalidate();
+ EXPECT_TRUE(buf.IsInvalidated());
+ EXPECT_FALSE(buf.IsAllocated());
+ EXPECT_EQ(0U, buf.length());
+ EXPECT_EQ(0U, buf.capacity());
+ buf.Invalidate();
+ EXPECT_TRUE(buf.IsInvalidated());
+ }
+}
diff --git a/test/common.js b/test/common.js
index 830206ea0f93ef..99ea679114c038 100644
--- a/test/common.js
+++ b/test/common.js
@@ -284,6 +284,9 @@ exports.platformTimeout = function(ms) {
if (process.config.target_defaults.default_configuration === 'Debug')
ms = 2 * ms;
+ if (global.__coverage__)
+ ms = 4 * ms;
+
if (exports.isAix)
return 2 * ms; // default localhost speed is slower on AIX
@@ -381,7 +384,11 @@ function leakedGlobals() {
if (!knownGlobals.includes(global[val]))
leaked.push(val);
- return leaked;
+ if (global.__coverage__) {
+ return leaked.filter((varname) => !/^(cov_|__cov)/.test(varname));
+ } else {
+ return leaked;
+ }
}
exports.leakedGlobals = leakedGlobals;
@@ -392,8 +399,7 @@ process.on('exit', function() {
if (!exports.globalCheck) return;
const leaked = leakedGlobals();
if (leaked.length > 0) {
- console.error('Unknown globals: %s', leaked);
- fail('Unknown global found');
+ fail(`Unexpected global(s) found: ${leaked.join(', ')}`);
}
});
@@ -588,3 +594,45 @@ Object.defineProperty(exports, 'hasIntl', {
return process.binding('config').hasIntl;
}
});
+
+// https://github.com/w3c/testharness.js/blob/master/testharness.js
+exports.WPT = {
+ test: (fn, desc) => {
+ try {
+ fn();
+ } catch (err) {
+ if (err instanceof Error)
+ err.message = `In ${desc}:\n ${err.message}`;
+ throw err;
+ }
+ },
+ assert_equals: assert.strictEqual,
+ assert_true: (value, message) => assert.strictEqual(value, true, message),
+ assert_false: (value, message) => assert.strictEqual(value, false, message),
+ assert_throws: (code, func, desc) => {
+ assert.throws(func, (err) => {
+ return typeof err === 'object' && 'name' in err && err.name === code.name;
+ }, desc);
+ },
+ assert_array_equals: assert.deepStrictEqual,
+ assert_unreached(desc) {
+ assert.fail(undefined, undefined, `Reached unreachable code: ${desc}`);
+ }
+};
+
+// Useful for testing expected internal/error objects
+exports.expectsError = function expectsError({code, type, message}) {
+ return function(error) {
+ assert.strictEqual(error.code, code);
+ if (type !== undefined)
+ assert(error instanceof type,
+ `${error} is not the expected type ${type}`);
+ if (message instanceof RegExp) {
+ assert(message.test(error.message),
+ `${error.message} does not match ${message}`);
+ } else if (typeof message === 'string') {
+ assert.strictEqual(error.message, message);
+ }
+ return true;
+ };
+};
diff --git a/test/doctool/test-doctool-html.js b/test/doctool/test-doctool-html.js
index 442381b54d7b72..e119ee86172c25 100644
--- a/test/doctool/test-doctool-html.js
+++ b/test/doctool/test-doctool-html.js
@@ -47,14 +47,25 @@ const testData = [
'Describe Foobar
in more detail here.
' +
'Foobar II# ' +
- 'Added in: v5.3.0, v4.2.0
' +
- 'Describe Foobar II
in more detail here.
' +
+ ' ' +
+ 'Describe Foobar II
in more detail here.' +
+ 'fg(1)
' +
'Deprecated thingy# ' +
' ' +
'Added in: v1.0.0 ' +
'Deprecated since: v2.0.0
Describe ' +
- 'Deprecated thingy
in more detail here.
' +
+ 'Deprecated thingy
in more detail here.' +
+ 'fg(1p) ' +
+ '' +
'Something# ' +
' ' +
diff --git a/test/doctool/test-doctool-json.js b/test/doctool/test-doctool-json.js
index ae7b2007b7d2ef..346a7f331e9d7f 100644
--- a/test/doctool/test-doctool-json.js
+++ b/test/doctool/test-doctool-json.js
@@ -89,7 +89,8 @@ const testData = [
textRaw: 'Foobar',
name: 'foobar',
meta: {
- added: ['v1.0.0']
+ added: ['v1.0.0'],
+ changes: []
},
desc: 'Describe Foobar
in more detail ' +
'here.
\n',
@@ -100,10 +101,17 @@ const testData = [
textRaw: 'Foobar II',
name: 'foobar_ii',
meta: {
- added: ['v5.3.0', 'v4.2.0']
+ added: ['v5.3.0', 'v4.2.0'],
+ changes: [
+ { version: 'v4.2.0',
+ 'pr-url': 'https://github.com/nodejs/node/pull/3276',
+ description: 'The `error` parameter can now be ' +
+ 'an arrow function.'
+ }
+ ]
},
desc: 'Describe Foobar II
in more detail ' +
- 'here.
\n',
+ 'here. fg(1)\n',
type: 'module',
displayName: 'Foobar II'
},
@@ -112,10 +120,11 @@ const testData = [
name: 'deprecated_thingy',
meta: {
added: ['v1.0.0'],
- deprecated: ['v2.0.0']
+ deprecated: ['v2.0.0'],
+ changes: []
},
desc: 'Describe Deprecated thingy
in more ' +
- 'detail here.
\n',
+ 'detail here. fg(1p)\n',
type: 'module',
displayName: 'Deprecated thingy'
},
diff --git a/test/fixtures/doc_with_yaml.md b/test/fixtures/doc_with_yaml.md
index 493c2e7e4268b2..89cf28104e5533 100644
--- a/test/fixtures/doc_with_yaml.md
+++ b/test/fixtures/doc_with_yaml.md
@@ -12,9 +12,13 @@ Describe `Foobar` in more detail here.
added:
- v5.3.0
- v4.2.0
+changes:
+ - version: v4.2.0
+ pr-url: https://github.com/nodejs/node/pull/3276
+ description: The `error` parameter can now be an arrow function.
-->
-Describe `Foobar II` in more detail here.
+Describe `Foobar II` in more detail here. fg(1)
## Deprecated thingy
-Describe `Deprecated thingy` in more detail here.
+Describe `Deprecated thingy` in more detail here. fg(1p)
## Something
diff --git a/test/fixtures/url-setter-tests.json b/test/fixtures/url-setter-tests.json
index 56a1c00a86fd45..4876b9940c1d98 100644
--- a/test/fixtures/url-setter-tests.json
+++ b/test/fixtures/url-setter-tests.json
@@ -1,5 +1,6 @@
{
"comment": [
+ "License: http://www.w3.org/Consortium/Legal/2008/04-testsuite-copyright.html",
"## Tests for setters of https://url.spec.whatwg.org/#urlutils-members",
"",
"This file contains a JSON object.",
@@ -19,8 +20,7 @@
" get the attribute `key` (invoke its getter).",
" The returned string must be equal to `value`.",
"",
- "Note: the 'href' setter is already covered by urltestdata.json.",
- "Source: https://github.com/w3c/web-platform-tests/tree/master/url"
+ "Note: the 'href' setter is already covered by urltestdata.json."
],
"protocol": [
{
@@ -103,7 +103,7 @@
}
},
{
- "comment": "Can’t switch from special scheme to non-special. Note: this may change, see https://github.com/whatwg/url/issues/104",
+ "comment": "Can’t switch from special scheme to non-special",
"href": "http://example.net",
"new_value": "b",
"expected": {
@@ -111,6 +111,22 @@
"protocol": "http:"
}
},
+ {
+ "href": "https://example.net",
+ "new_value": "s",
+ "expected": {
+ "href": "https://example.net/",
+ "protocol": "https:"
+ }
+ },
+ {
+ "href": "ftp://example.net",
+ "new_value": "test",
+ "expected": {
+ "href": "ftp://example.net/",
+ "protocol": "ftp:"
+ }
+ },
{
"comment": "Cannot-be-a-base URL doesn’t have a host, but URL in a special scheme must.",
"href": "mailto:me@example.net",
@@ -121,7 +137,7 @@
}
},
{
- "comment": "Can’t switch from non-special scheme to special. Note: this may change, see https://github.com/whatwg/url/issues/104",
+ "comment": "Can’t switch from non-special scheme to special",
"href": "ssh://me@example.net",
"new_value": "http",
"expected": {
@@ -129,6 +145,30 @@
"protocol": "ssh:"
}
},
+ {
+ "href": "ssh://me@example.net",
+ "new_value": "gopher",
+ "expected": {
+ "href": "ssh://me@example.net/",
+ "protocol": "ssh:"
+ }
+ },
+ {
+ "href": "ssh://me@example.net",
+ "new_value": "file",
+ "expected": {
+ "href": "ssh://me@example.net/",
+ "protocol": "ssh:"
+ }
+ },
+ {
+ "href": "nonsense:///test",
+ "new_value": "https",
+ "expected": {
+ "href": "nonsense:///test",
+ "protocol": "nonsense:"
+ }
+ },
{
"comment": "Stuff after the first ':' is ignored",
"href": "http://example.net",
diff --git a/test/fixtures/url-tests-additional.js b/test/fixtures/url-tests-additional.js
new file mode 100644
index 00000000000000..ffe47fb639dcba
--- /dev/null
+++ b/test/fixtures/url-tests-additional.js
@@ -0,0 +1,6 @@
+'use strict';
+
+// This file contains test cases not part of the WPT
+
+module.exports = [
+];
diff --git a/test/fixtures/url-tests.json b/test/fixtures/url-tests.js
similarity index 82%
rename from test/fixtures/url-tests.json
rename to test/fixtures/url-tests.js
index 634a8e3f663985..3b162391cd2ef5 100644
--- a/test/fixtures/url-tests.json
+++ b/test/fixtures/url-tests.js
@@ -1,7 +1,12 @@
+'use strict';
+
+/* WPT Refs:
+ https://github.com/w3c/web-platform-tests/blob/b207902/url/urltestdata.json
+ License: http://www.w3.org/Consortium/Legal/2008/04-testsuite-copyright.html
+*/
+module.exports =
[
"# Based on http://trac.webkit.org/browser/trunk/LayoutTests/fast/url/script-tests/segments.js",
- "License: http://www.w3.org/Consortium/Legal/2008/04-testsuite-copyright.html",
- "Source: https://github.com/w3c/web-platform-tests/tree/master/url",
{
"input": "http://example\t.\norg",
"base": "http://example.org/foo/bar",
@@ -167,6 +172,20 @@
"search": "?%20d%20",
"hash": "# e"
},
+ {
+ "input": "lolscheme:x x#x x",
+ "base": "about:blank",
+ "href": "lolscheme:x x#x x",
+ "protocol": "lolscheme:",
+ "username": "",
+ "password": "",
+ "host": "",
+ "hostname": "",
+ "port": "",
+ "pathname": "x x",
+ "search": "",
+ "hash": "#x x"
+ },
{
"input": "http://f:/c",
"base": "http://example.org/foo/bar",
@@ -552,21 +571,21 @@
"search": "",
"hash": ""
},
- {
- "input": "foo://",
- "base": "http://example.org/foo/bar",
- "href": "foo:///",
- "origin": "null",
- "protocol": "foo:",
- "username": "",
- "password": "",
- "host": "",
- "hostname": "",
- "port": "",
- "pathname": "/",
- "search": "",
- "hash": ""
- },
+ // {
+ // "input": "foo://",
+ // "base": "http://example.org/foo/bar",
+ // "href": "foo://",
+ // "origin": "null",
+ // "protocol": "foo:",
+ // "username": "",
+ // "password": "",
+ // "host": "",
+ // "hostname": "",
+ // "port": "",
+ // "pathname": "",
+ // "search": "",
+ // "hash": ""
+ // },
{
"input": "http://a:b@c:29/d",
"base": "http://example.org/foo/bar",
@@ -1021,6 +1040,26 @@
"search": "",
"hash": ""
},
+ // {
+ // "input": "file://example:1/",
+ // "base": "about:blank",
+ // "failure": true
+ // },
+ // {
+ // "input": "file://example:test/",
+ // "base": "about:blank",
+ // "failure": true
+ // },
+ {
+ "input": "file://example%/",
+ "base": "about:blank",
+ "failure": true
+ },
+ {
+ "input": "file://[example]/",
+ "base": "about:blank",
+ "failure": true
+ },
{
"input": "ftps:/example.com/",
"base": "http://example.org/foo/bar",
@@ -2250,11 +2289,6 @@
"search": "",
"hash": "# %C2%BB"
},
- {
- "input": "http://[www.google.com]/",
- "base": "about:blank",
- "failure": true
- },
{
"input": "http://www.google.com",
"base": "about:blank",
@@ -3555,6 +3589,32 @@
"base": "http://other.com/",
"failure": true
},
+ // "U+FFFD",
+ // {
+ // "input": "https://\ufffd",
+ // "base": "about:blank",
+ // "failure": true
+ // },
+ // {
+ // "input": "https://%EF%BF%BD",
+ // "base": "about:blank",
+ // "failure": true
+ // },
+ {
+ "input": "https://x/\ufffd?\ufffd#\ufffd",
+ "base": "about:blank",
+ "href": "https://x/%EF%BF%BD?%EF%BF%BD#%EF%BF%BD",
+ "origin": "https://x",
+ "protocol": "https:",
+ "username": "",
+ "password": "",
+ "host": "x",
+ "hostname": "x",
+ "port": "",
+ "pathname": "/%EF%BF%BD",
+ "search": "?%EF%BF%BD",
+ "hash": "#%EF%BF%BD"
+ },
"Test name prepping, fullwidth input should be converted to ASCII and NOT IDN-ized. This is 'Go' in fullwidth UTF-8/UTF-16.",
{
"input": "http://Go.com",
@@ -3662,18 +3722,23 @@
"base": "http://other.com/",
"failure": true
},
- "Invalid escaping should trigger the regular host error handling",
+ "Invalid escaping in hosts causes failure",
{
"input": "http://%3g%78%63%30%2e%30%32%35%30%2E.01",
"base": "http://other.com/",
"failure": true
},
- "Something that isn't exactly an IP should get treated as a host and spaces escaped",
+ "A space in a host causes failure",
{
"input": "http://192.168.0.1 hello",
"base": "http://other.com/",
"failure": true
},
+ {
+ "input": "https://x x:12",
+ "base": "about:blank",
+ "failure": true
+ },
"Fullwidth and escaped UTF-8 fullwidth should still be treated as IP",
{
"input": "http://0Xc0.0250.01",
@@ -3691,11 +3756,36 @@
"hash": ""
},
"Broken IPv6",
+ {
+ "input": "http://[www.google.com]/",
+ "base": "about:blank",
+ "failure": true
+ },
{
"input": "http://[google.com]",
"base": "http://other.com/",
"failure": true
},
+ // {
+ // "input": "http://[::1.2.3.4x]",
+ // "base": "http://other.com/",
+ // "failure": true
+ // },
+ // {
+ // "input": "http://[::1.2.3.]",
+ // "base": "http://other.com/",
+ // "failure": true
+ // },
+ // {
+ // "input": "http://[::1.2.]",
+ // "base": "http://other.com/",
+ // "failure": true
+ // },
+ // {
+ // "input": "http://[::1.]",
+ // "base": "http://other.com/",
+ // "failure": true
+ // },
"Misc Unicode",
{
"input": "http://foo:💩@example.com/bar",
@@ -4238,22 +4328,111 @@
"search": "",
"hash": ""
},
- "# unknown schemes and non-ASCII domains",
+ // "# unknown schemes and their hosts",
+ // {
+ // "input": "sc://ñ.test/",
+ // "base": "about:blank",
+ // "href": "sc://%C3%B1.test/",
+ // "origin": "null",
+ // "protocol": "sc:",
+ // "username": "",
+ // "password": "",
+ // "host": "%C3%B1.test",
+ // "hostname": "%C3%B1.test",
+ // "port": "",
+ // "pathname": "/",
+ // "search": "",
+ // "hash": ""
+ // },
+ // {
+ // "input": "sc://\u001F!\"$&'()*+,-.;<=>^_`{|}~/",
+ // "base": "about:blank",
+ // "href": "sc://%1F!\"$&'()*+,-.;<=>^_`{|}~/",
+ // "origin": "null",
+ // "protocol": "sc:",
+ // "username": "",
+ // "password": "",
+ // "host": "%1F!\"$&'()*+,-.;<=>^_`{|}~",
+ // "hostname": "%1F!\"$&'()*+,-.;<=>^_`{|}~",
+ // "port": "",
+ // "pathname": "/",
+ // "search": "",
+ // "hash": ""
+ // },
+ {
+ "input": "sc://\u0000/",
+ "base": "about:blank",
+ "failure": true
+ },
{
- "input": "sc://ñ.test/",
+ "input": "sc:// /",
"base": "about:blank",
- "href": "sc://xn--ida.test/",
- "origin": "null",
- "protocol": "sc:",
- "username": "",
- "password": "",
- "host": "xn--ida.test",
- "hostname": "xn--ida.test",
- "port": "",
- "pathname": "/",
- "search": "",
- "hash": ""
+ "failure": true
},
+ // {
+ // "input": "sc://%/",
+ // "base": "about:blank",
+ // "href": "sc://%/",
+ // "protocol": "sc:",
+ // "username": "",
+ // "password": "",
+ // "host": "%",
+ // "hostname": "%",
+ // "port": "",
+ // "pathname": "/",
+ // "search": "",
+ // "hash": ""
+ // },
+ // {
+ // "input": "sc://@/",
+ // "base": "about:blank",
+ // "failure": true
+ // },
+ // {
+ // "input": "sc://te@s:t@/",
+ // "base": "about:blank",
+ // "failure": true
+ // },
+ // {
+ // "input": "sc://:/",
+ // "base": "about:blank",
+ // "failure": true
+ // },
+ // {
+ // "input": "sc://:12/",
+ // "base": "about:blank",
+ // "failure": true
+ // },
+ {
+ "input": "sc://[/",
+ "base": "about:blank",
+ "failure": true
+ },
+ {
+ "input": "sc://\\/",
+ "base": "about:blank",
+ "failure": true
+ },
+ {
+ "input": "sc://]/",
+ "base": "about:blank",
+ "failure": true
+ },
+ // {
+ // "input": "x",
+ // "base": "sc://ñ",
+ // "href": "sc://%C3%B1/x",
+ // "origin": "null",
+ // "protocol": "sc:",
+ // "username": "",
+ // "password": "",
+ // "host": "%C3%B1",
+ // "hostname": "%C3%B1",
+ // "port": "",
+ // "pathname": "/x",
+ // "search": "",
+ // "hash": ""
+ // },
"# unknown schemes and backslashes",
{
"input": "sc:\\../",
@@ -4286,6 +4465,88 @@
"search": "",
"hash": ""
},
+ "# unknown scheme with bogus percent-encoding",
+ {
+ "input": "wow:%NBD",
+ "base": "about:blank",
+ "href": "wow:%NBD",
+ "origin": "null",
+ "protocol": "wow:",
+ "username": "",
+ "password": "",
+ "host": "",
+ "hostname": "",
+ "port": "",
+ "pathname": "%NBD",
+ "search": "",
+ "hash": ""
+ },
+ {
+ "input": "wow:%1G",
+ "base": "about:blank",
+ "href": "wow:%1G",
+ "origin": "null",
+ "protocol": "wow:",
+ "username": "",
+ "password": "",
+ "host": "",
+ "hostname": "",
+ "port": "",
+ "pathname": "%1G",
+ "search": "",
+ "hash": ""
+ },
+ "# Hosts and percent-encoding",
+ // {
+ // "input": "ftp://example.com%80/",
+ // "base": "about:blank",
+ // "failure": true
+ // },
+ // {
+ // "input": "ftp://example.com%A0/",
+ // "base": "about:blank",
+ // "failure": true
+ // },
+ // {
+ // "input": "https://example.com%80/",
+ // "base": "about:blank",
+ // "failure": true
+ // },
+ // {
+ // "input": "https://example.com%A0/",
+ // "base": "about:blank",
+ // "failure": true
+ // },
+ {
+ "input": "ftp://%e2%98%83",
+ "base": "about:blank",
+ "href": "ftp://xn--n3h/",
+ "origin": "ftp://☃",
+ "protocol": "ftp:",
+ "username": "",
+ "password": "",
+ "host": "xn--n3h",
+ "hostname": "xn--n3h",
+ "port": "",
+ "pathname": "/",
+ "search": "",
+ "hash": ""
+ },
+ {
+ "input": "https://%e2%98%83",
+ "base": "about:blank",
+ "href": "https://xn--n3h/",
+ "origin": "https://☃",
+ "protocol": "https:",
+ "username": "",
+ "password": "",
+ "host": "xn--n3h",
+ "hostname": "xn--n3h",
+ "port": "",
+ "pathname": "/",
+ "search": "",
+ "hash": ""
+ },
"# tests from jsdom/whatwg-url designed for code coverage",
{
"input": "http://127.0.0.1:10100/relative_import.html",
@@ -4378,7 +4639,7 @@
"port": "",
"pathname": "/foo/bar",
"search": "??a=b&c=d",
- "searchParams": "%3Fa=b&c=d",
+ // "searchParams": "%3Fa=b&c=d",
"hash": ""
},
"# Scheme only",
@@ -4433,7 +4694,7 @@
"port": "",
"pathname": "/baz",
"search": "?qux",
- "searchParams": "",
+ "searchParams": "qux=",
"hash": "#foo%08bar"
},
"# IPv4 parsing (via https://github.com/nodejs/node/pull/10317)",
@@ -4607,6 +4868,50 @@
"search": "",
"hash": ""
},
+ {
+ "input": "https://0x.0x.0",
+ "base": "about:blank",
+ "href": "https://0.0.0.0/",
+ "origin": "https://0.0.0.0",
+ "protocol": "https:",
+ "username": "",
+ "password": "",
+ "host": "0.0.0.0",
+ "hostname": "0.0.0.0",
+ "port": "",
+ "pathname": "/",
+ "search": "",
+ "hash": ""
+ },
+ "# file URLs containing percent-encoded Windows drive letters (shouldn't work)",
+ {
+ "input": "file:///C%3A/",
+ "base": "about:blank",
+ "href": "file:///C%3A/",
+ "protocol": "file:",
+ "username": "",
+ "password": "",
+ "host": "",
+ "hostname": "",
+ "port": "",
+ "pathname": "/C%3A/",
+ "search": "",
+ "hash": ""
+ },
+ {
+ "input": "file:///C%7C/",
+ "base": "about:blank",
+ "href": "file:///C%7C/",
+ "protocol": "file:",
+ "username": "",
+ "password": "",
+ "host": "",
+ "hostname": "",
+ "port": "",
+ "pathname": "/C%7C/",
+ "search": "",
+ "hash": ""
+ },
"# file URLs relative to other file URLs (via https://github.com/jsdom/whatwg-url/pull/60)",
{
"input": "pix/submit.gif",
@@ -4649,5 +4954,673 @@
"pathname": "/",
"search": "",
"hash": ""
+ },
+ "# More file URL tests by zcorpan and annevk",
+ {
+ "input": "/",
+ "base": "file:///C:/a/b",
+ "href": "file:///C:/",
+ "protocol": "file:",
+ "username": "",
+ "password": "",
+ "host": "",
+ "hostname": "",
+ "port": "",
+ "pathname": "/C:/",
+ "search": "",
+ "hash": ""
+ },
+ {
+ "input": "//d:",
+ "base": "file:///C:/a/b",
+ "href": "file:///d:",
+ "protocol": "file:",
+ "username": "",
+ "password": "",
+ "host": "",
+ "hostname": "",
+ "port": "",
+ "pathname": "/d:",
+ "search": "",
+ "hash": ""
+ },
+ {
+ "input": "//d:/..",
+ "base": "file:///C:/a/b",
+ "href": "file:///d:/",
+ "protocol": "file:",
+ "username": "",
+ "password": "",
+ "host": "",
+ "hostname": "",
+ "port": "",
+ "pathname": "/d:/",
+ "search": "",
+ "hash": ""
+ },
+ {
+ "input": "..",
+ "base": "file:///ab:/",
+ "href": "file:///",
+ "protocol": "file:",
+ "username": "",
+ "password": "",
+ "host": "",
+ "hostname": "",
+ "port": "",
+ "pathname": "/",
+ "search": "",
+ "hash": ""
+ },
+ {
+ "input": "..",
+ "base": "file:///1:/",
+ "href": "file:///",
+ "protocol": "file:",
+ "username": "",
+ "password": "",
+ "host": "",
+ "hostname": "",
+ "port": "",
+ "pathname": "/",
+ "search": "",
+ "hash": ""
+ },
+ {
+ "input": "",
+ "base": "file:///test?test#test",
+ "href": "file:///test?test",
+ "protocol": "file:",
+ "username": "",
+ "password": "",
+ "host": "",
+ "hostname": "",
+ "port": "",
+ "pathname": "/test",
+ "search": "?test",
+ "hash": ""
+ },
+ {
+ "input": "file:",
+ "base": "file:///test?test#test",
+ "href": "file:///test?test",
+ "protocol": "file:",
+ "username": "",
+ "password": "",
+ "host": "",
+ "hostname": "",
+ "port": "",
+ "pathname": "/test",
+ "search": "?test",
+ "hash": ""
+ },
+ {
+ "input": "?x",
+ "base": "file:///test?test#test",
+ "href": "file:///test?x",
+ "protocol": "file:",
+ "username": "",
+ "password": "",
+ "host": "",
+ "hostname": "",
+ "port": "",
+ "pathname": "/test",
+ "search": "?x",
+ "hash": ""
+ },
+ {
+ "input": "file:?x",
+ "base": "file:///test?test#test",
+ "href": "file:///test?x",
+ "protocol": "file:",
+ "username": "",
+ "password": "",
+ "host": "",
+ "hostname": "",
+ "port": "",
+ "pathname": "/test",
+ "search": "?x",
+ "hash": ""
+ },
+ {
+ "input": "#x",
+ "base": "file:///test?test#test",
+ "href": "file:///test?test#x",
+ "protocol": "file:",
+ "username": "",
+ "password": "",
+ "host": "",
+ "hostname": "",
+ "port": "",
+ "pathname": "/test",
+ "search": "?test",
+ "hash": "#x"
+ },
+ {
+ "input": "file:#x",
+ "base": "file:///test?test#test",
+ "href": "file:///test?test#x",
+ "protocol": "file:",
+ "username": "",
+ "password": "",
+ "host": "",
+ "hostname": "",
+ "port": "",
+ "pathname": "/test",
+ "search": "?test",
+ "hash": "#x"
+ },
+ "# file URLs without base URL by Rimas Misevičius",
+ {
+ "input": "file:",
+ "base": "about:blank",
+ "href": "file:///",
+ "protocol": "file:",
+ "username": "",
+ "password": "",
+ "host": "",
+ "hostname": "",
+ "port": "",
+ "pathname": "/",
+ "search": "",
+ "hash": ""
+ },
+ {
+ "input": "file:?q=v",
+ "base": "about:blank",
+ "href": "file:///?q=v",
+ "protocol": "file:",
+ "username": "",
+ "password": "",
+ "host": "",
+ "hostname": "",
+ "port": "",
+ "pathname": "/",
+ "search": "?q=v",
+ "hash": ""
+ },
+ {
+ "input": "file:#frag",
+ "base": "about:blank",
+ "href": "file:///#frag",
+ "protocol": "file:",
+ "username": "",
+ "password": "",
+ "host": "",
+ "hostname": "",
+ "port": "",
+ "pathname": "/",
+ "search": "",
+ "hash": "#frag"
+ },
+ "# IPv6 tests",
+ {
+ "input": "http://[1:0::]",
+ "base": "http://example.net/",
+ "href": "http://[1::]/",
+ "origin": "http://[1::]",
+ "protocol": "http:",
+ "username": "",
+ "password": "",
+ "host": "[1::]",
+ "hostname": "[1::]",
+ "port": "",
+ "pathname": "/",
+ "search": "",
+ "hash": ""
+ },
+ {
+ "input": "http://[0:1:2:3:4:5:6:7:8]",
+ "base": "http://example.net/",
+ "failure": true
+ },
+ {
+ "input": "https://[0::0::0]",
+ "base": "about:blank",
+ "failure": true
+ },
+ {
+ "input": "https://[0:.0]",
+ "base": "about:blank",
+ "failure": true
+ },
+ {
+ "input": "https://[0:0:]",
+ "base": "about:blank",
+ "failure": true
+ },
+ {
+ "input": "https://[0:1:2:3:4:5:6:7.0.0.0.1]",
+ "base": "about:blank",
+ "failure": true
+ },
+ {
+ "input": "https://[0:1.00.0.0.0]",
+ "base": "about:blank",
+ "failure": true
+ },
+ {
+ "input": "https://[0:1.290.0.0.0]",
+ "base": "about:blank",
+ "failure": true
+ },
+ {
+ "input": "https://[0:1.23.23]",
+ "base": "about:blank",
+ "failure": true
+ },
+ "# Empty host",
+ {
+ "input": "http://?",
+ "base": "about:blank",
+ "failure": "true"
+ },
+ {
+ "input": "http://#",
+ "base": "about:blank",
+ "failure": "true"
+ },
+ "# Non-special-URL path tests",
+ // {
+ // "input": "sc://ñ",
+ // "base": "about:blank",
+ // "href": "sc://%C3%B1",
+ // "origin": "null",
+ // "protocol": "sc:",
+ // "username": "",
+ // "password": "",
+ // "host": "%C3%B1",
+ // "hostname": "%C3%B1",
+ // "port": "",
+ // "pathname": "",
+ // "search": "",
+ // "hash": ""
+ // },
+ // {
+ // "input": "sc://ñ?x",
+ // "base": "about:blank",
+ // "href": "sc://%C3%B1?x",
+ // "origin": "null",
+ // "protocol": "sc:",
+ // "username": "",
+ // "password": "",
+ // "host": "%C3%B1",
+ // "hostname": "%C3%B1",
+ // "port": "",
+ // "pathname": "",
+ // "search": "?x",
+ // "hash": ""
+ // },
+ // {
+ // "input": "sc://ñ#x",
+ // "base": "about:blank",
+ // "href": "sc://%C3%B1#x",
+ // "origin": "null",
+ // "protocol": "sc:",
+ // "username": "",
+ // "password": "",
+ // "host": "%C3%B1",
+ // "hostname": "%C3%B1",
+ // "port": "",
+ // "pathname": "",
+ // "search": "",
+ // "hash": "#x"
+ // },
+ // {
+ // "input": "#x",
+ // "base": "sc://ñ",
+ // "href": "sc://%C3%B1#x",
+ // "origin": "null",
+ // "protocol": "sc:",
+ // "username": "",
+ // "password": "",
+ // "host": "%C3%B1",
+ // "hostname": "%C3%B1",
+ // "port": "",
+ // "pathname": "",
+ // "search": "",
+ // "hash": "#x"
+ // },
+ // {
+ // "input": "?x",
+ // "base": "sc://ñ",
+ // "href": "sc://%C3%B1?x",
+ // "origin": "null",
+ // "protocol": "sc:",
+ // "username": "",
+ // "password": "",
+ // "host": "%C3%B1",
+ // "hostname": "%C3%B1",
+ // "port": "",
+ // "pathname": "",
+ // "search": "?x",
+ // "hash": ""
+ // },
+ // {
+ // "input": "sc://?",
+ // "base": "about:blank",
+ // "href": "sc://?",
+ // "protocol": "sc:",
+ // "username": "",
+ // "password": "",
+ // "host": "",
+ // "hostname": "",
+ // "port": "",
+ // "pathname": "",
+ // "search": "",
+ // "hash": ""
+ // },
+ // {
+ // "input": "sc://#",
+ // "base": "about:blank",
+ // "href": "sc://#",
+ // "protocol": "sc:",
+ // "username": "",
+ // "password": "",
+ // "host": "",
+ // "hostname": "",
+ // "port": "",
+ // "pathname": "",
+ // "search": "",
+ // "hash": ""
+ // },
+ {
+ "input": "///",
+ "base": "sc://x/",
+ "href": "sc:///",
+ "protocol": "sc:",
+ "username": "",
+ "password": "",
+ "host": "",
+ "hostname": "",
+ "port": "",
+ "pathname": "/",
+ "search": "",
+ "hash": ""
+ },
+ // {
+ // "input": "////",
+ // "base": "sc://x/",
+ // "href": "sc:////",
+ // "protocol": "sc:",
+ // "username": "",
+ // "password": "",
+ // "host": "",
+ // "hostname": "",
+ // "port": "",
+ // "pathname": "//",
+ // "search": "",
+ // "hash": ""
+ // },
+ // {
+ // "input": "////x/",
+ // "base": "sc://x/",
+ // "href": "sc:////x/",
+ // "protocol": "sc:",
+ // "username": "",
+ // "password": "",
+ // "host": "",
+ // "hostname": "",
+ // "port": "",
+ // "pathname": "//x/",
+ // "search": "",
+ // "hash": ""
+ // },
+ {
+ "input": "tftp://foobar.com/someconfig;mode=netascii",
+ "base": "about:blank",
+ "href": "tftp://foobar.com/someconfig;mode=netascii",
+ "origin": "null",
+ "protocol": "tftp:",
+ "username": "",
+ "password": "",
+ "host": "foobar.com",
+ "hostname": "foobar.com",
+ "port": "",
+ "pathname": "/someconfig;mode=netascii",
+ "search": "",
+ "hash": ""
+ },
+ {
+ "input": "telnet://user:pass@foobar.com:23/",
+ "base": "about:blank",
+ "href": "telnet://user:pass@foobar.com:23/",
+ "origin": "null",
+ "protocol": "telnet:",
+ "username": "user",
+ "password": "pass",
+ "host": "foobar.com:23",
+ "hostname": "foobar.com",
+ "port": "23",
+ "pathname": "/",
+ "search": "",
+ "hash": ""
+ },
+ {
+ "input": "ut2004://10.10.10.10:7777/Index.ut2",
+ "base": "about:blank",
+ "href": "ut2004://10.10.10.10:7777/Index.ut2",
+ "origin": "null",
+ "protocol": "ut2004:",
+ "username": "",
+ "password": "",
+ "host": "10.10.10.10:7777",
+ "hostname": "10.10.10.10",
+ "port": "7777",
+ "pathname": "/Index.ut2",
+ "search": "",
+ "hash": ""
+ },
+ {
+ "input": "redis://foo:bar@somehost:6379/0?baz=bam&qux=baz",
+ "base": "about:blank",
+ "href": "redis://foo:bar@somehost:6379/0?baz=bam&qux=baz",
+ "origin": "null",
+ "protocol": "redis:",
+ "username": "foo",
+ "password": "bar",
+ "host": "somehost:6379",
+ "hostname": "somehost",
+ "port": "6379",
+ "pathname": "/0",
+ "search": "?baz=bam&qux=baz",
+ "hash": ""
+ },
+ {
+ "input": "rsync://foo@host:911/sup",
+ "base": "about:blank",
+ "href": "rsync://foo@host:911/sup",
+ "origin": "null",
+ "protocol": "rsync:",
+ "username": "foo",
+ "password": "",
+ "host": "host:911",
+ "hostname": "host",
+ "port": "911",
+ "pathname": "/sup",
+ "search": "",
+ "hash": ""
+ },
+ {
+ "input": "git://github.com/foo/bar.git",
+ "base": "about:blank",
+ "href": "git://github.com/foo/bar.git",
+ "origin": "null",
+ "protocol": "git:",
+ "username": "",
+ "password": "",
+ "host": "github.com",
+ "hostname": "github.com",
+ "port": "",
+ "pathname": "/foo/bar.git",
+ "search": "",
+ "hash": ""
+ },
+ {
+ "input": "irc://myserver.com:6999/channel?passwd",
+ "base": "about:blank",
+ "href": "irc://myserver.com:6999/channel?passwd",
+ "origin": "null",
+ "protocol": "irc:",
+ "username": "",
+ "password": "",
+ "host": "myserver.com:6999",
+ "hostname": "myserver.com",
+ "port": "6999",
+ "pathname": "/channel",
+ "search": "?passwd",
+ "hash": ""
+ },
+ {
+ "input": "dns://fw.example.org:9999/foo.bar.org?type=TXT",
+ "base": "about:blank",
+ "href": "dns://fw.example.org:9999/foo.bar.org?type=TXT",
+ "origin": "null",
+ "protocol": "dns:",
+ "username": "",
+ "password": "",
+ "host": "fw.example.org:9999",
+ "hostname": "fw.example.org",
+ "port": "9999",
+ "pathname": "/foo.bar.org",
+ "search": "?type=TXT",
+ "hash": ""
+ },
+ {
+ "input": "ldap://localhost:389/ou=People,o=JNDITutorial",
+ "base": "about:blank",
+ "href": "ldap://localhost:389/ou=People,o=JNDITutorial",
+ "origin": "null",
+ "protocol": "ldap:",
+ "username": "",
+ "password": "",
+ "host": "localhost:389",
+ "hostname": "localhost",
+ "port": "389",
+ "pathname": "/ou=People,o=JNDITutorial",
+ "search": "",
+ "hash": ""
+ },
+ {
+ "input": "git+https://github.com/foo/bar",
+ "base": "about:blank",
+ "href": "git+https://github.com/foo/bar",
+ "origin": "null",
+ "protocol": "git+https:",
+ "username": "",
+ "password": "",
+ "host": "github.com",
+ "hostname": "github.com",
+ "port": "",
+ "pathname": "/foo/bar",
+ "search": "",
+ "hash": ""
+ },
+ {
+ "input": "urn:ietf:rfc:2648",
+ "base": "about:blank",
+ "href": "urn:ietf:rfc:2648",
+ "origin": "null",
+ "protocol": "urn:",
+ "username": "",
+ "password": "",
+ "host": "",
+ "hostname": "",
+ "port": "",
+ "pathname": "ietf:rfc:2648",
+ "search": "",
+ "hash": ""
+ },
+ {
+ "input": "tag:joe@example.org,2001:foo/bar",
+ "base": "about:blank",
+ "href": "tag:joe@example.org,2001:foo/bar",
+ "origin": "null",
+ "protocol": "tag:",
+ "username": "",
+ "password": "",
+ "host": "",
+ "hostname": "",
+ "port": "",
+ "pathname": "joe@example.org,2001:foo/bar",
+ "search": "",
+ "hash": ""
+ },
+ "# percent encoded hosts in non-special-URLs",
+ // {
+ // "input": "non-special://%E2%80%A0/",
+ // "base": "about:blank",
+ // "href": "non-special://%E2%80%A0/",
+ // "protocol": "non-special:",
+ // "username": "",
+ // "password": "",
+ // "host": "%E2%80%A0",
+ // "hostname": "%E2%80%A0",
+ // "port": "",
+ // "pathname": "/",
+ // "search": "",
+ // "hash": ""
+ // },
+ // {
+ // "input": "non-special://H%4fSt/path",
+ // "base": "about:blank",
+ // "href": "non-special://H%4fSt/path",
+ // "protocol": "non-special:",
+ // "username": "",
+ // "password": "",
+ // "host": "H%4fSt",
+ // "hostname": "H%4fSt",
+ // "port": "",
+ // "pathname": "/path",
+ // "search": "",
+ // "hash": ""
+ // },
+ "# IPv6 in non-special-URLs",
+ {
+ "input": "non-special://[1:2:0:0:5:0:0:0]/",
+ "base": "about:blank",
+ "href": "non-special://[1:2:0:0:5::]/",
+ "protocol": "non-special:",
+ "username": "",
+ "password": "",
+ "host": "[1:2:0:0:5::]",
+ "hostname": "[1:2:0:0:5::]",
+ "port": "",
+ "pathname": "/",
+ "search": "",
+ "hash": ""
+ },
+ {
+ "input": "non-special://[1:2:0:0:0:0:0:3]/",
+ "base": "about:blank",
+ "href": "non-special://[1:2::3]/",
+ "protocol": "non-special:",
+ "username": "",
+ "password": "",
+ "host": "[1:2::3]",
+ "hostname": "[1:2::3]",
+ "port": "",
+ "pathname": "/",
+ "search": "",
+ "hash": ""
+ },
+ {
+ "input": "non-special://[1:2::3]:80/",
+ "base": "about:blank",
+ "href": "non-special://[1:2::3]:80/",
+ "protocol": "non-special:",
+ "username": "",
+ "password": "",
+ "host": "[1:2::3]:80",
+ "hostname": "[1:2::3]",
+ "port": "80",
+ "pathname": "/",
+ "search": "",
+ "hash": ""
+ },
+ {
+ "input": "non-special://[:80/",
+ "base": "about:blank",
+ "failure": true
}
]
diff --git a/test/internet/test-dgram-multicast-multi-process.js b/test/internet/test-dgram-multicast-multi-process.js
index 46f47a5982977e..793d22bd4c137e 100644
--- a/test/internet/test-dgram-multicast-multi-process.js
+++ b/test/internet/test-dgram-multicast-multi-process.js
@@ -22,14 +22,14 @@ if (common.inFreeBSDJail) {
return;
}
-function launchChildProcess(index) {
+function launchChildProcess() {
const worker = fork(__filename, ['child']);
workers[worker.pid] = worker;
worker.messagesReceived = [];
// Handle the death of workers.
- worker.on('exit', function(code, signal) {
+ worker.on('exit', function(code) {
// Don't consider this the true death if the worker has finished
// successfully or if the exit code is 0.
if (worker.isDone || code === 0) {
@@ -189,7 +189,7 @@ if (process.argv[2] === 'child') {
process.send({ message: buf.toString() });
- if (receivedMessages.length == messages.length) {
+ if (receivedMessages.length === messages.length) {
// .dropMembership() not strictly needed but here as a sanity check.
listenSocket.dropMembership(LOCAL_BROADCAST_HOST);
process.nextTick(function() {
diff --git a/test/known_issues/test-url-parse-conformance.js b/test/known_issues/test-url-parse-conformance.js
index f7fcec821c8e50..62c36da87e6678 100644
--- a/test/known_issues/test-url-parse-conformance.js
+++ b/test/known_issues/test-url-parse-conformance.js
@@ -7,7 +7,7 @@ const url = require('url');
const assert = require('assert');
const path = require('path');
-const tests = require(path.join(common.fixturesDir, 'url-tests.json'));
+const tests = require(path.join(common.fixturesDir, 'url-tests'));
let failed = 0;
let attempted = 0;
diff --git a/test/message/eval_messages.out b/test/message/eval_messages.out
index 44965be374bac5..ba0c35431c6907 100644
--- a/test/message/eval_messages.out
+++ b/test/message/eval_messages.out
@@ -3,7 +3,8 @@
with(this){__filename}
^^^^
SyntaxError: Strict mode code may not include a with statement
- at Object.exports.runInThisContext (vm.js:*)
+ at createScript (vm.js:*)
+ at Object.runInThisContext (vm.js:*)
at Object. ([eval]-wrapper:*:*)
at Module._compile (module.js:*:*)
at Immediate. (bootstrap_node.js:*:*)
@@ -15,10 +16,11 @@ SyntaxError: Strict mode code may not include a with statement
[eval]:1
throw new Error("hello")
^
+
Error: hello
at [eval]:1:7
at ContextifyScript.Script.runInThisContext (vm.js:*)
- at Object.exports.runInThisContext (vm.js:*)
+ at Object.runInThisContext (vm.js:*)
at Object. ([eval]-wrapper:*:*)
at Module._compile (module.js:*:*)
at Immediate. (bootstrap_node.js:*:*)
@@ -28,10 +30,11 @@ Error: hello
[eval]:1
throw new Error("hello")
^
+
Error: hello
at [eval]:1:7
at ContextifyScript.Script.runInThisContext (vm.js:*)
- at Object.exports.runInThisContext (vm.js:*)
+ at Object.runInThisContext (vm.js:*)
at Object. ([eval]-wrapper:*:*)
at Module._compile (module.js:*:*)
at Immediate. (bootstrap_node.js:*:*)
@@ -42,20 +45,23 @@ Error: hello
[eval]:1
var x = 100; y = x;
^
+
ReferenceError: y is not defined
at [eval]:1:16
at ContextifyScript.Script.runInThisContext (vm.js:*)
- at Object.exports.runInThisContext (vm.js:*)
+ at Object.runInThisContext (vm.js:*)
at Object. ([eval]-wrapper:*:*)
at Module._compile (module.js:*:*)
at Immediate. (bootstrap_node.js:*:*)
at runCallback (timers.js:*:*)
at tryOnImmediate (timers.js:*:*)
at processImmediate [as _immediateCallback] (timers.js:*:*)
+
[eval]:1
var ______________________________________________; throw 10
^
10
+
[eval]:1
var ______________________________________________; throw 10
^
diff --git a/test/message/stdin_messages.out b/test/message/stdin_messages.out
index 828bee92cb6f7f..b4c51d7ad567f0 100644
--- a/test/message/stdin_messages.out
+++ b/test/message/stdin_messages.out
@@ -1,10 +1,10 @@
[stdin]
-
[stdin]:1
with(this){__filename}
^^^^
SyntaxError: Strict mode code may not include a with statement
- at Object.exports.runInThisContext (vm.js:*)
+ at createScript (vm.js:*)
+ at Object.runInThisContext (vm.js:*)
at Object. ([stdin]-wrapper:*:*)
at Module._compile (module.js:*:*)
at Immediate. (bootstrap_node.js:*:*)
@@ -13,28 +13,28 @@ SyntaxError: Strict mode code may not include a with statement
at processImmediate [as _immediateCallback] (timers.js:*:*)
42
42
-
[stdin]:1
throw new Error("hello")
^
+
Error: hello
- at [stdin]:1:*
+ at [stdin]:1:7
at ContextifyScript.Script.runInThisContext (vm.js:*)
- at Object.exports.runInThisContext (vm.js:*)
+ at Object.runInThisContext (vm.js:*)
at Object. ([stdin]-wrapper:*:*)
at Module._compile (module.js:*:*)
at Immediate. (bootstrap_node.js:*:*)
at runCallback (timers.js:*:*)
at tryOnImmediate (timers.js:*:*)
at processImmediate [as _immediateCallback] (timers.js:*:*)
-
[stdin]:1
throw new Error("hello")
^
+
Error: hello
at [stdin]:1:*
at ContextifyScript.Script.runInThisContext (vm.js:*)
- at Object.exports.runInThisContext (vm.js:*)
+ at Object.runInThisContext (vm.js:*)
at Object.