From 27764823b5def4d82dbf3d31077fa71c604f01db Mon Sep 17 00:00:00 2001 From: Hai Date: Sun, 10 Aug 2025 22:00:30 +0800 Subject: [PATCH] refactor: benchmark --- package.json | 3 +- test/BenchmarkTestCases.benchmark.mjs | 1044 +++++------------ .../_helpers/benchmark.worker.mjs | 604 ++++++++++ yarn.lock | 2 +- 4 files changed, 923 insertions(+), 730 deletions(-) create mode 100644 test/benchmarkCases/_helpers/benchmark.worker.mjs diff --git a/package.json b/package.json index 32d7a2210..933afdc02 100644 --- a/package.json +++ b/package.json @@ -50,7 +50,7 @@ "fix:yarn": "yarn-deduplicate -s highest yarn.lock", "fix:special": "node node_modules/tooling/inherit-types --write && node node_modules/tooling/format-schemas --write && node tooling/generate-runtime-code.js --write && node tooling/generate-wasm-code.js --write && node node_modules/tooling/compile-to-definitions --write && node node_modules/tooling/precompile-schemas --write && node node_modules/tooling/generate-types --no-template-literals --write", "build:examples": "cd examples && node buildAll.js", - "benchmark": "node --max-old-space-size=4096 --experimental-vm-modules --trace-deprecation --hash-seed=1 --random-seed=1 --no-opt --predictable --predictable-gc-schedule --interpreted-frames-native-stack --allow-natives-syntax --expose-gc --no-concurrent-sweeping ./test/BenchmarkTestCases.benchmark.mjs", + "benchmark": "node --max-old-space-size=4096 --experimental-vm-modules --expose-gc ./test/BenchmarkTestCases.benchmark.mjs", "pretest": "yarn lint", "test": "yarn test:base", "test:update-snapshots": "yarn test:base -u", @@ -152,6 +152,7 @@ "jest-diff": "^30.0.5", "jest-environment-node": "^30.0.5", "jest-junit": "^16.0.0", + "jest-worker": "^30.0.5", "json-loader": "^0.5.7", "json5": "^2.1.3", "less": "^4.0.0", diff --git a/test/BenchmarkTestCases.benchmark.mjs b/test/BenchmarkTestCases.benchmark.mjs index 0fa9b6932..c215ae071 100644 --- a/test/BenchmarkTestCases.benchmark.mjs +++ b/test/BenchmarkTestCases.benchmark.mjs @@ -1,10 +1,41 @@ -import { constants, writeFile } from "fs"; -import fs from "fs/promises"; -import { Session } from "inspector"; +import fs from "fs"; +import fsp from "fs/promises"; +import os from "os"; import path from "path"; import { fileURLToPath, pathToFileURL } from "url"; +import { Worker } from "jest-worker"; import { simpleGit } from "simple-git"; -import { Bench, hrtimeNow } from "tinybench"; + +/** @typedef {import("./benchmarkCases/_helpers/benchmark.worker.mjs").BenchmarkResult} BenchmarkResult */ +/** @typedef {import("./benchmarkCases/_helpers/benchmark.worker.mjs").Result} Result */ + +/** + * @typedef {object} BaselineRev + * @property {string} name baseline rev name + * @property {string} rev baseline revision + */ + +/** + * @typedef {object} Baseline + * @property {string} name baseline rev name + * @property {string=} rev baseline revision + * @property {string} path baseline path + */ + +/** + * @typedef {object} BenchmarkScenario + * @property {string} name scenario name + * @property {"development" | "production"} mode mode + * @property {boolean=} watch watch mode + */ + +/** + * @typedef {object} BenchmarkTask + * @property {string} id task id (includes benchmark name and scenario name) + * @property {string} benchmark benchmark name + * @property {BenchmarkScenario} scenario scenario + * @property {Baseline} baseline baseline + */ const __dirname = path.dirname(fileURLToPath(import.meta.url)); const rootPath = path.join(__dirname, ".."); @@ -12,6 +43,10 @@ const git = simpleGit(rootPath); const REV_LIST_REGEXP = /^([a-f0-9]+)\s*([a-f0-9]+)\s*([a-f0-9]+)?\s*$/; +/** + * Gets V8 flags based on Node.js version + * @returns {string[]} Array of V8 flags + */ const getV8Flags = () => { const nodeVersionMajor = Number.parseInt( process.version.slice(1).split(".")[0], @@ -38,25 +73,13 @@ const getV8Flags = () => { return flags; }; -const checkV8Flags = () => { - const requiredFlags = getV8Flags(); - const actualFlags = process.execArgv; - const missingFlags = requiredFlags.filter( - (flag) => !actualFlags.includes(flag) - ); - if (missingFlags.length > 0) { - console.warn(`Missing required flags: ${missingFlags.join(", ")}`); - } -}; - -checkV8Flags(); - +/** @type {boolean} */ const LAST_COMMIT = typeof process.env.LAST_COMMIT !== "undefined"; -const GENERATE_PROFILE = typeof process.env.PROFILE !== "undefined"; /** - * @param {(string | undefined)[]} revList rev list - * @returns {Promise} head + * Gets the HEAD commit + * @param {(string | undefined)[]} revList Revision list from git + * @returns {Promise} HEAD commit hash */ async function getHead(revList) { if (typeof process.env.HEAD !== "undefined") { @@ -73,9 +96,10 @@ async function getHead(revList) { } /** - * @param {string} head head - * @param {(string | undefined)[]} revList rev list - * @returns {Promise} base + * Gets the BASE commit + * @param {string} head HEAD commit hash + * @param {(string | undefined)[]} revList Revision list from git + * @returns {Promise} BASE commit hash */ async function getBase(head, revList) { if (typeof process.env.BASE !== "undefined") { @@ -114,7 +138,8 @@ async function getBase(head, revList) { } /** - * @returns {Promise<{name: string, rev: string}[]>} baseline revs + * Gets baseline revisions for benchmarking + * @returns {Promise} Array of baseline revisions */ async function getBaselineRevs() { if (LAST_COMMIT) { @@ -156,728 +181,291 @@ async function getBaselineRevs() { } /** - * @param {number} n number of runs - * @returns {number} distribution + * Splits array into N chunks + * @template T + * @param {T[]} array Input array + * @param {number} n Number of chunks + * @returns {T[][]} Array of chunks */ -function tDistribution(n) { - // two-sided, 90% - // https://en.wikipedia.org/wiki/Student%27s_t-distribution - if (n <= 30) { - // 1 2 ... - const data = [ - 6.314, 2.92, 2.353, 2.132, 2.015, 1.943, 1.895, 1.86, 1.833, 1.812, 1.796, - 1.782, 1.771, 1.761, 1.753, 1.746, 1.74, 1.734, 1.729, 1.725, 1.721, - 1.717, 1.714, 1.711, 1.708, 1.706, 1.703, 1.701, 1.699, 1.697 - ]; - return data[n - 1]; - } else if (n <= 120) { - // 30 40 50 60 70 80 90 100 110 120 - const data = [ - 1.697, 1.684, 1.676, 1.671, 1.667, 1.664, 1.662, 1.66, 1.659, 1.658 - ]; - const a = data[Math.floor(n / 10) - 3]; - const b = data[Math.ceil(n / 10) - 3]; - const f = n / 10 - Math.floor(n / 10); - - return a * (1 - f) + b * f; - } - - return 1.645; -} - -const output = path.join(__dirname, "js"); -const baselinesPath = path.join(output, "benchmark-baselines"); - -const baselineRevisions = await getBaselineRevs(); - -try { - await fs.mkdir(baselinesPath, { recursive: true }); -} catch (_err) {} // eslint-disable-line no-empty - -const baselines = []; - -for (const baselineInfo of baselineRevisions) { - /** - * @returns {void} - */ - function addBaseline() { - baselines.push({ - name: baselineInfo.name, - rev: baselineRevision, - webpack: async () => { - const webpack = ( - await import( - pathToFileURL(path.resolve(baselinePath, "./lib/index.js")) - ) - ).default; - - return webpack; - } - }); - } - - const baselineRevision = baselineInfo.rev; - const baselinePath = - baselineRevision === undefined - ? path.resolve(__dirname, "../") - : path.resolve(baselinesPath, baselineRevision); - - try { - await fs.access(path.resolve(baselinePath, ".git"), constants.R_OK); - } catch (_err) { - try { - await fs.mkdir(baselinePath); - } catch (_err) {} // eslint-disable-line no-empty - - const gitIndex = path.resolve(rootPath, ".git/index"); - const index = await fs.readFile(gitIndex); - const prevHead = await git.raw(["rev-list", "-n", "1", "HEAD"]); - - await simpleGit(baselinePath).raw([ - "--git-dir", - path.join(rootPath, ".git"), - "reset", - "--hard", - baselineRevision - ]); - - await git.raw(["reset", "--soft", prevHead.split("\n")[0]]); - await fs.writeFile(gitIndex, index); - } finally { - addBaseline(); - } -} - -function buildConfiguration( - test, - baseline, - realConfig, - scenario, - testDirectory -) { - const { watch, ...rest } = scenario; - const config = structuredClone({ ...realConfig, ...rest }); - - config.entry = path.resolve( - testDirectory, - config.entry - ? /\.(c|m)?js$/.test(config.entry) - ? config.entry - : `${config.entry}.js` - : "./index.js" - ); - config.devtool = config.devtool || false; - config.name = `${test}-${baseline.name}-${scenario.name}`; - config.context = testDirectory; - config.performance = false; - config.output = config.output || {}; - config.output.path = path.join( - baseOutputPath, - test, - `scenario-${scenario.name}`, - `baseline-${baseline.name}` - ); - config.plugins = config.plugins || []; - if (config.cache) { - config.cache.cacheDirectory = path.resolve(config.output.path, ".cache"); - } - if (watch) { - config.cache = { - type: "memory", - maxGenerations: 1 - }; - } - return config; -} - -// Filename sanitization -function sanitizeFilename(filename) { - // Replace invalid filesystem characters with underscores - return filename - .replace(/[<>:"/\\|?*]/g, "_") - .replace(/[\u0000-\u001F\u0080-\u009F]/g, "_") - .replace(/^\.+/, "_") - .replace(/\.+$/, "_") - .replace(/\s+/g, "_") - .slice(0, 200); // Limit filename length -} - -async function withProfiling(name, fn) { - // Ensure the profiles directory exists - await fs.mkdir(path.join(baseOutputPath, "profiles"), { recursive: true }); - - const session = new Session(); - session.connect(); - - // Enable and start profiling - await new Promise((resolve, reject) => { - session.post("Profiler.enable", (err) => { - if (err) return reject(err); - session.post("Profiler.start", (err) => { - if (err) return reject(err); - resolve(); - }); - }); - }); - - // Run the benchmarked function - // No need to `console.time`, it'll be included in the - // CPU Profile. - await fn(); - - // Stop profiling and get the CPU profile - const profile = await new Promise((resolve, reject) => { - session.post("Profiler.stop", (err, { profile }) => { - if (err) return reject(err); - resolve(profile); - }); - }); - - session.disconnect(); - - const outputFile = `${sanitizeFilename(name)}-${Date.now()}.cpuprofile`; - - await fs.writeFile( - path.join(baseOutputPath, "profiles", outputFile), - JSON.stringify(profile), - "utf8" - ); - - console.log(`CPU profile saved to ${outputFile}`); -} - -function runWebpack(webpack, config) { - return new Promise((resolve, reject) => { - const compiler = webpack(config); - compiler.run((err, stats) => { - if (err) return reject(err); - if (stats && (stats.hasWarnings() || stats.hasErrors())) { - return reject(new Error(stats.toString())); - } - - compiler.close((closeErr) => { - if (closeErr) return reject(closeErr); - if (stats) stats.toString(); // Force stats computation - resolve(); - }); - }); - }); -} - -async function runWatch(webpack, config, callback) { - const compiler = webpack(config); - return compiler.watch({}, callback); -} - -const scenarios = [ - { - name: "mode-development", - mode: "development" - }, - { - name: "mode-development-rebuild", - mode: "development", - watch: true - }, - { - name: "mode-production", - mode: "production" - } -]; - -const baseOutputPath = path.join(__dirname, "js", "benchmark"); - -const withCodSpeed = async (/** @type {import("tinybench").Bench} */ bench) => { - const { Measurement, getGitDir, mongoMeasurement, setupCore, teardownCore } = - await import("@codspeed/core"); - - if (!Measurement.isInstrumented()) { - const rawRun = bench.run; - bench.run = async () => { - console.warn( - `[CodSpeed] ${bench.tasks.length} benches detected but no instrumentation found, falling back to tinybench` - ); - return await rawRun.bind(bench)(); - }; - return bench; - } - - const getStackTrace = (belowFn) => { - const oldLimit = Error.stackTraceLimit; - Error.stackTraceLimit = Infinity; - const dummyObject = {}; - const v8Handler = Error.prepareStackTrace; - Error.prepareStackTrace = (dummyObject, v8StackTrace) => v8StackTrace; - Error.captureStackTrace(dummyObject, belowFn || getStackTrace); - const v8StackTrace = dummyObject.stack; - Error.prepareStackTrace = v8Handler; - Error.stackTraceLimit = oldLimit; - return v8StackTrace; - }; - - const getCallingFile = () => { - const stack = getStackTrace(); - let callingFile = stack[2].getFileName(); // [here, withCodSpeed, actual caller] - const gitDir = getGitDir(callingFile); - if (gitDir === undefined) { - throw new Error("Could not find a git repository"); - } - if (callingFile.startsWith("file://")) { - callingFile = fileURLToPath(callingFile); - } - return path.relative(gitDir, callingFile); - }; - - const rawAdd = bench.add; - bench.add = (name, fn, opts) => { - const callingFile = getCallingFile(); - const uri = `${callingFile}::${name}`; - const options = { ...opts, uri }; - return rawAdd.bind(bench)(name, fn, options); - }; - const rootCallingFile = getCallingFile(); - bench.run = async function run() { - const iterations = bench.opts.iterations - 1; - console.log("[CodSpeed] running"); - setupCore(); - for (const task of bench.tasks) { - await bench.opts.setup?.(task, "run"); - await task.fnOpts.beforeAll?.call(task); - const samples = []; - async function iteration() { - try { - await task.fnOpts.beforeEach?.call(task, "run"); - const start = bench.opts.now(); - await task.fn(); - samples.push(bench.opts.now() - start || 0); - await task.fnOpts.afterEach?.call(this, "run"); - } catch (err) { - if (bench.opts.throws) { - throw err; - } - } - } - while (samples.length < iterations) { - await iteration(); - } - // Codspeed Measure - const uri = - task.opts && "uri" in task.options - ? task.opts.uri - : `${rootCallingFile}::${task.name}`; - await task.fnOpts.beforeEach?.call(task); - await mongoMeasurement.start(uri); - await (async function __codspeed_root_frame__() { - Measurement.startInstrumentation(); - await task.fn(); - Measurement.stopInstrumentation(uri); - })(); - await mongoMeasurement.stop(uri); - await task.fnOpts.afterEach?.call(task); - console.log(`[Codspeed] ✔ Measured ${uri}`); - await task.fnOpts.afterAll?.call(task); - - await bench.opts.teardown?.(task, "run"); - task.processRunResult({ latencySamples: samples }); - } - teardownCore(); - console.log(`[CodSpeed] Done running ${bench.tasks.length} benches.`); - return bench.tasks; - }; - return bench; -}; - -const bench = await withCodSpeed( - new Bench({ - now: hrtimeNow, - throws: true, - warmup: true, - warmupIterations: 2, - iterations: 8, - setup(task, mode) { - global.gc(); - console.log(`Setup (${mode} mode): ${task.name}`); - }, - teardown(task, mode) { - console.log(`Teardown (${mode} mode): ${task.name}`); - } - }) -); - -async function registerSuite(bench, test, baselines) { - const testDirectory = path.join(casesPath, test); - const optionsPath = path.resolve(testDirectory, "options.mjs"); - - let options = {}; - - try { - options = await import(`${pathToFileURL(optionsPath)}`); - } catch (_err) { - // Ignore - } - - if (typeof options.setup !== "undefined") { - await options.setup(); - } - - if (test.includes("-unit")) { - const fullBenchName = `unit benchmark "${test}"`; - - console.log(`Register: ${fullBenchName}`); - - const benchmarkPath = path.resolve(testDirectory, "index.bench.mjs"); - const registerBenchmarks = await import(`${pathToFileURL(benchmarkPath)}`); - - registerBenchmarks.default(bench); - - return; - } - - const realConfig = ( - await import( - `${pathToFileURL(path.join(testDirectory, "webpack.config.js"))}` - ) - ).default; - - await Promise.all( - baselines.map(async (baseline) => { - const webpack = await baseline.webpack(); - - await Promise.all( - scenarios.map(async (scenario) => { - const config = buildConfiguration( - test, - baseline, - realConfig, - scenario, - testDirectory - ); - - const stringifiedScenario = JSON.stringify(scenario); - const benchName = `benchmark "${test}", scenario '${stringifiedScenario}'${LAST_COMMIT ? "" : ` ${baseline.name} (${baseline.rev})`}`; - const fullBenchName = `benchmark "${test}", scenario '${stringifiedScenario}' ${baseline.name} ${baseline.rev ? `(${baseline.rev})` : ""}`; - - console.log(`Register: ${fullBenchName}`); - - if (scenario.watch) { - const entry = path.resolve(config.entry); - const originalEntryContent = await fs.readFile(entry, "utf8"); - - let watching; - let next; - - const watchCallback = (err, stats) => { - if (next) { - next(err, stats); - } - }; - - bench.add( - benchName, - async () => { - console.time(`Time: ${benchName}`); - - let resolve; - let reject; - - const promise = new Promise((res, rej) => { - resolve = res; - reject = rej; - }); - - next = (err, stats) => { - if (err) { - reject(err); - return; - } - - if (stats.hasWarnings() || stats.hasErrors()) { - reject(new Error(stats.toString())); - return; - } - - // Construct and print stats to be more accurate with real life projects - stats.toString(); - resolve(); - console.timeEnd(`Time: ${benchName}`); - }; - - await new Promise((resolve, reject) => { - writeFile( - entry, - `${originalEntryContent};console.log('watch test')`, - (err) => { - if (err) { - reject(err); - return; - } - - resolve(); - } - ); - }); - - await promise; - }, - { - async beforeAll() { - this.collectBy = `${test}, scenario '${stringifiedScenario}'`; - - let resolve; - let reject; - - const promise = new Promise((res, rej) => { - resolve = res; - reject = rej; - }); - - next = (err, stats) => { - if (err) { - reject(err); - return; - } - - if (stats.hasWarnings() || stats.hasErrors()) { - reject(new Error(stats.toString())); - return; - } - - // Construct and print stats to be more accurate with real life projects - stats.toString(); - resolve(); - }; - - if (GENERATE_PROFILE) { - await withProfiling( - benchName, - async () => - (watching = await runWatch( - webpack, - config, - watchCallback - )) - ); - } else { - watching = await runWatch(webpack, config, watchCallback); - } - - // Make an extra fs call to warn up filesystem caches - // Also wait a first run callback - await new Promise((resolve, reject) => { - writeFile( - entry, - `${originalEntryContent};console.log('watch test')`, - (err) => { - if (err) { - reject(err); - return; - } - - resolve(); - } - ); - }); - - await promise; - }, - async afterAll() { - // Close watching - await new Promise((resolve, reject) => { - if (watching) { - watching.close((closeErr) => { - if (closeErr) { - reject(closeErr); - return; - } - - resolve(); - }); - } - }); - - // Write original content - await new Promise((resolve, reject) => { - writeFile(entry, originalEntryContent, (err) => { - if (err) { - reject(err); - return; - } - - resolve(); - }); - }); - } - } - ); - } else { - bench.add( - benchName, - async () => { - if (GENERATE_PROFILE) { - await withProfiling(benchName, () => - runWebpack(webpack, config) - ); - } else { - console.time(`Time: ${benchName}`); - await runWebpack(webpack, config); - console.timeEnd(`Time: ${benchName}`); - } - }, - { - beforeAll() { - this.collectBy = `${test}, scenario '${stringifiedScenario}'`; - } - } - ); - } - }) - ); - }) - ); -} - -await fs.rm(baseOutputPath, { recursive: true, force: true }); - -const FILTER = - typeof process.env.FILTER !== "undefined" - ? new RegExp(process.env.FILTER) - : undefined; - -const NEGATIVE_FILTER = - typeof process.env.NEGATIVE_FILTER !== "undefined" - ? new RegExp(process.env.NEGATIVE_FILTER) - : undefined; - -const casesPath = path.join(__dirname, "benchmarkCases"); -const allBenchmarks = (await fs.readdir(casesPath)) - .filter( - (item) => - !item.includes("_") && - (FILTER ? FILTER.test(item) : true) && - (NEGATIVE_FILTER ? !NEGATIVE_FILTER.test(item) : true) - ) - .sort((a, b) => a.localeCompare(b)); - -const benchmarks = allBenchmarks.filter((item) => !item.includes("-long")); -const longBenchmarks = allBenchmarks.filter((item) => item.includes("-long")); -const i = Math.floor(benchmarks.length / longBenchmarks.length); - -for (const [index, value] of longBenchmarks.entries()) { - benchmarks.splice(index * i, 0, value); -} - -const shard = - typeof process.env.SHARD !== "undefined" - ? process.env.SHARD.split("/").map((item) => Number.parseInt(item, 10)) - : [1, 1]; - -if ( - typeof shard[0] === "undefined" || - typeof shard[1] === "undefined" || - shard[0] > shard[1] || - shard[0] <= 0 || - shard[1] <= 0 -) { - throw new Error( - `Invalid \`SHARD\` value - it should be less then a part and more than zero, shard part is ${shard[0]}, count of shards is ${shard[1]}` - ); -} - function splitToNChunks(array, n) { const result = []; - for (let i = n; i > 0; i--) { result.push(array.splice(0, Math.ceil(array.length / i))); } - return result; } -const countOfBenchmarks = benchmarks.length; +class BenchmarkRunner { + constructor() { + /** @type {BenchmarkScenario[]} */ + this.scenarios = [ + { + name: "mode-development", + mode: "development" + }, + { + name: "mode-development-rebuild", + mode: "development", + watch: true + }, + { + name: "mode-production", + mode: "production" + } + ]; + /** @type {string} */ + this.output = path.join(__dirname, "./js"); + /** @type {string} */ + this.baselinesPath = path.join(this.output, "benchmark-baselines"); + /** @type {string} */ + this.baseOutputPath = path.join(this.output, "benchmark"); + /** @type {string} */ + this.casesPath = path.join(__dirname, "benchmarkCases"); + } -if (countOfBenchmarks < shard[1]) { - throw new Error( - `Shard upper limit is more than count of benchmarks, count of benchmarks is ${countOfBenchmarks}, shard is ${shard[1]}` - ); -} + /** + * Initializes benchmark + * @returns {Promise} Baselines + */ + async initialize() { + const baselineRevisions = await getBaselineRevs(); + try { + await fsp.mkdir(this.baselinesPath, { recursive: true }); + } catch (_err) {} // eslint-disable-line no-empty -await Promise.all( - splitToNChunks(benchmarks, shard[1])[shard[0] - 1].map((benchmark) => - registerSuite(bench, benchmark, baselines) - ) -); + /** @type {Baseline[]} */ + const baselines = []; -function formatNumber(value, precision, fractionDigits) { - return Math.abs(value) >= 10 ** precision - ? value.toFixed() - : Math.abs(value) < 10 ** (precision - fractionDigits) - ? value.toFixed(fractionDigits) - : value.toPrecision(precision); -} + for (const baselineInfo of baselineRevisions) { + const baselineRevision = baselineInfo.rev; -const US_PER_MS = 10 ** 3; -const NS_PER_MS = 10 ** 6; + const baselinePath = + baselineRevision === undefined + ? path.resolve(__dirname, "../") + : path.resolve(this.baselinesPath, baselineRevision); -function formatTime(value) { - const toType = - Math.round(value) > 0 - ? "ms" - : Math.round(value * US_PER_MS) / US_PER_MS > 0 - ? "µs" - : "ns"; + try { + await fsp.access( + path.resolve(baselinePath, ".git"), + fsp.constants.R_OK + ); + } catch (_err) { + try { + await fsp.mkdir(baselinePath); + } catch (_err) {} // eslint-disable-line no-empty - switch (toType) { - case "ms": { - return `${formatNumber(value, 5, 2)} ms`; + const gitIndex = path.resolve(rootPath, ".git/index"); + const index = await fsp.readFile(gitIndex); + const prevHead = await git.raw(["rev-list", "-n", "1", "HEAD"]); + + await simpleGit(baselinePath).raw([ + "--git-dir", + path.join(rootPath, ".git"), + "reset", + "--hard", + baselineRevision + ]); + + await git.raw(["reset", "--soft", prevHead.split("\n")[0]]); + await fsp.writeFile(gitIndex, index); + } finally { + baselines.push({ + name: baselineInfo.name, + rev: baselineRevision, + path: baselinePath + }); + } } - case "µs": { - return `${formatNumber(value * US_PER_MS, 5, 2)} µs`; + await fsp.rm(this.baseOutputPath, { recursive: true, force: true }); + + return baselines; + } + + async createWorkerPool() { + const cpus = Math.max(1, os.availableParallelism() - 1); + + this.workerPool = new Worker( + path.join(this.casesPath, "_helpers", "/benchmark.worker.mjs"), + { + exposedMethods: ["run"], + numWorkers: cpus, + forkOptions: { silent: false, execArgv: getV8Flags() } + } + ); + } + + /** + * Prepares benchmark tasks + * @param {{BenchmarkTask[]}} benchmarkTasks all benchmark tasks + * @returns {Promise} + */ + async prepareBenchmarkTask(benchmarkTasks) { + for (const task of benchmarkTasks) { + const { benchmark } = task; + const dir = path.join(this.casesPath, benchmark); + const optionsPath = path.resolve(dir, "options.mjs"); + + let options = {}; + if (optionsPath && fs.existsSync(optionsPath)) { + options = await import(`${pathToFileURL(optionsPath)}`); + } + if (typeof options.setup !== "undefined") { + await options.setup(); + } } - case "ns": { - return `${formatNumber(value * NS_PER_MS, 5, 2)} ns`; + } + + /** + * Create benchmark tasks + * @param {string[]} benchmarks all benchmarks + * @param {BenchmarkScenario[]} scenarios all scenarios + * @param {Baseline[]} baselines all baselines + * @returns {BenchmarkTask[]} benchmark tasks + */ + createBenchmarkTasks(benchmarks, scenarios, baselines) { + const benchmarkTasks = []; + + for (let benchIndex = 0; benchIndex < benchmarks.length; benchIndex++) { + for ( + let scenarioIndex = 0; + scenarioIndex < scenarios.length; + scenarioIndex++ + ) { + const benchmark = benchmarks[benchIndex]; + const scenario = scenarios[scenarioIndex]; + + benchmarkTasks.push({ + id: `${benchmark}-${scenario.name}`, + benchmark, + scenario, + baselines + }); + } + } + + return benchmarkTasks; + } + + /** + * Process benchmark results + * @param {BenchmarkResult[]} benchmarkResults benchmark results + */ + processResults(benchmarkResults) { + /** @type {Map} */ + const statsByTests = new Map(); + for (const benchmarkResult of benchmarkResults) { + for (const results of benchmarkResult.results) { + const collectBy = results.collectBy; + const allStats = statsByTests.get(collectBy); + + if (!allStats) { + statsByTests.set(collectBy, [results]); + continue; + } + allStats.push(results); + + const firstStats = allStats[0]; + const secondStats = allStats[1]; + + console.log( + `Result: ${firstStats.text} is ${Math.round( + (secondStats.mean / firstStats.mean) * 100 - 100 + )}% ${secondStats.maxConfidence < firstStats.minConfidence ? "slower than" : secondStats.minConfidence > firstStats.maxConfidence ? "faster than" : "the same as"} ${secondStats.text}` + ); + } + } + } + + async run() { + const baselines = await this.initialize(); + await this.createWorkerPool(); + const FILTER = + typeof process.env.FILTER !== "undefined" + ? new RegExp(process.env.FILTER) + : undefined; + + const NEGATIVE_FILTER = + typeof process.env.NEGATIVE_FILTER !== "undefined" + ? new RegExp(process.env.NEGATIVE_FILTER) + : undefined; + + const allBenchmarkCases = (await fsp.readdir(this.casesPath)) + .filter( + (item) => + !item.includes("_") && + (FILTER ? FILTER.test(item) : true) && + (NEGATIVE_FILTER ? !NEGATIVE_FILTER.test(item) : true) + ) + .sort((a, b) => a.localeCompare(b)); + + const benchmarkCases = allBenchmarkCases.filter( + (item) => !item.includes("-long") + ); + const longRunningBenchmarkCases = allBenchmarkCases.filter((item) => + item.includes("-long") + ); + const i = Math.floor( + benchmarkCases.length / longRunningBenchmarkCases.length + ); + + for (const [index, value] of longRunningBenchmarkCases.entries()) { + benchmarkCases.splice(index * i, 0, value); + } + + const shard = + typeof process.env.SHARD !== "undefined" + ? process.env.SHARD.split("/").map((item) => Number.parseInt(item, 10)) + : [1, 1]; + + if ( + typeof shard[0] === "undefined" || + typeof shard[1] === "undefined" || + shard[0] > shard[1] || + shard[0] <= 0 || + shard[1] <= 0 + ) { + throw new Error( + `Invalid \`SHARD\` value - it should be less then a part and more than zero, shard part is ${shard[0]}, count of shards is ${shard[1]}` + ); + } + + const countOfBenchmarks = benchmarkCases.length; + + if (countOfBenchmarks < shard[1]) { + throw new Error( + `Shard upper limit is more than count of benchmarks, count of benchmarks is ${countOfBenchmarks}, shard is ${shard[1]}` + ); + } + + const currentShardBenchmarkCases = splitToNChunks(benchmarkCases, shard[1])[ + shard[0] - 1 + ]; + + const benchmarkTasks = this.createBenchmarkTasks( + currentShardBenchmarkCases, + this.scenarios, + baselines + ); + + await this.prepareBenchmarkTask(benchmarkTasks); + + try { + /** @type {BenchmarkResult[]} */ + const benchmarkResults = await Promise.all( + benchmarkTasks.map((task) => + this.workerPool.run({ + task, + casesPath: this.casesPath, + baseOutputPath: this.baseOutputPath + }) + ) + ); + + this.processResults(benchmarkResults); + } finally { + await this.workerPool.end(); } } } -const statsByTests = new Map(); - -bench.addEventListener("cycle", (event) => { - const task = event.task; - const runs = task.runs; - const nSqrt = Math.sqrt(runs); - const z = tDistribution(runs - 1); - const { latency } = task.result; - const minConfidence = latency.mean - (z * latency.sd) / nSqrt; - const maxConfidence = latency.mean + (z * latency.sd) / nSqrt; - const mean = formatTime(latency.mean); - const deviation = formatTime(latency.sd); - const minConfidenceFormatted = formatTime(minConfidence); - const maxConfidenceFormatted = formatTime(maxConfidence); - const confidence = `${mean} ± ${deviation} [${minConfidenceFormatted}; ${maxConfidenceFormatted}]`; - const text = `${task.name} ${confidence}`; - - const collectBy = task.collectBy; - const allStats = statsByTests.get(collectBy); - - console.log(`Cycle: ${task.name} ${confidence} (${runs} runs sampled)`); - - const info = { ...latency, text, minConfidence, maxConfidence }; - - if (!allStats) { - statsByTests.set(collectBy, [info]); - return; - } - - allStats.push(info); - - const firstStats = allStats[0]; - const secondStats = allStats[1]; - - console.log( - `Result: ${firstStats.text} is ${Math.round( - (secondStats.mean / firstStats.mean) * 100 - 100 - )}% ${secondStats.maxConfidence < firstStats.minConfidence ? "slower than" : secondStats.minConfidence > firstStats.maxConfidence ? "faster than" : "the same as"} ${secondStats.text}` - ); -}); - -bench.run(); +new BenchmarkRunner().run(); diff --git a/test/benchmarkCases/_helpers/benchmark.worker.mjs b/test/benchmarkCases/_helpers/benchmark.worker.mjs new file mode 100644 index 000000000..4a9178fa8 --- /dev/null +++ b/test/benchmarkCases/_helpers/benchmark.worker.mjs @@ -0,0 +1,604 @@ +import { writeFile } from "fs"; +import fs from "fs/promises"; +import { Session } from "inspector"; +import path from "path"; +import { fileURLToPath, pathToFileURL } from "url"; +import { Bench, hrtimeNow } from "tinybench"; + +/** @typedef {import("tinybench").Task} TinybenchTask */ +/** @typedef {import("tinybench").TaskResult} TinybenchTaskResult */ +/** @typedef {import("tinybench").Statistics} TinybenchStatistics */ +/** @typedef {import("tinybench").Bench} TinybenchBench */ + +/** + * @typedef {object} TResult + * @property {string} collectBy benchmark name and scenario name + * @property {string} text benchmark result text + * @property {number} minConfidence min confidence + * @property {number} maxConfidence max confidence + */ + +/** + * @typedef {TinybenchStatistics & TResult} Result + */ + +/** + * @typedef {object} BenchmarkResult + * @property {string} benchmark benchmark name + * @property {string} scenario scenario name + * @property {Result[]} results benchmark Result + */ + +const GENERATE_PROFILE = typeof process.env.PROFILE !== "undefined"; + +let _baseOutputPath; + +// Filename sanitization +function sanitizeFilename(filename) { + // Replace invalid filesystem characters with underscores + return filename + .replace(/[<>:"/\\|?*]/g, "_") + .replace(/[\u0000-\u001F\u0080-\u009F]/g, "_") + .replace(/^\.+/, "_") + .replace(/\.+$/, "_") + .replace(/\s+/g, "_") + .slice(0, 200); // Limit filename length +} + +async function withProfiling(name, fn) { + console.log(name); + + // Ensure the profiles directory exists + await fs.mkdir(path.join(_baseOutputPath, "profiles"), { recursive: true }); + + const session = new Session(); + session.connect(); + + // Enable and start profiling + await new Promise((resolve, reject) => { + session.post("Profiler.enable", (err) => { + if (err) return reject(err); + session.post("Profiler.start", (err) => { + if (err) return reject(err); + resolve(); + }); + }); + }); + + // Run the benchmarked function + // No need to `console.time`, it'll be included in the + // CPU Profile. + await fn(); + + // Stop profiling and get the CPU profile + const profile = await new Promise((resolve, reject) => { + session.post("Profiler.stop", (err, { profile }) => { + if (err) return reject(err); + resolve(profile); + }); + }); + + session.disconnect(); + + const outputFile = `${sanitizeFilename(name)}-${Date.now()}.cpuprofile`; + + await fs.writeFile( + path.join(_baseOutputPath, "profiles", outputFile), + JSON.stringify(profile), + "utf8" + ); + + console.log(`CPU profile saved to ${outputFile}`); +} + +function formatNumber(value, precision, fractionDigits) { + return Math.abs(value) >= 10 ** precision + ? value.toFixed() + : Math.abs(value) < 10 ** (precision - fractionDigits) + ? value.toFixed(fractionDigits) + : value.toPrecision(precision); +} + +const US_PER_MS = 10 ** 3; +const NS_PER_MS = 10 ** 6; + +function formatTime(value) { + const toType = + Math.round(value) > 0 + ? "ms" + : Math.round(value * US_PER_MS) / US_PER_MS > 0 + ? "µs" + : "ns"; + + switch (toType) { + case "ms": { + return `${formatNumber(value, 5, 2)} ms`; + } + case "µs": { + return `${formatNumber(value * US_PER_MS, 5, 2)} µs`; + } + case "ns": { + return `${formatNumber(value * NS_PER_MS, 5, 2)} ns`; + } + } +} + +const withCodSpeed = async (/** @type {TinybenchBench} */ bench) => { + const { Measurement, getGitDir, mongoMeasurement, setupCore, teardownCore } = + await import("@codspeed/core"); + + if (!Measurement.isInstrumented()) { + const rawRun = bench.run; + bench.run = async () => { + console.warn( + `[CodSpeed] ${bench.tasks.length} benches detected but no instrumentation found, falling back to tinybench` + ); + return await rawRun.bind(bench)(); + }; + return bench; + } + + const getStackTrace = (belowFn) => { + const oldLimit = Error.stackTraceLimit; + Error.stackTraceLimit = Infinity; + const dummyObject = {}; + const v8Handler = Error.prepareStackTrace; + Error.prepareStackTrace = (dummyObject, v8StackTrace) => v8StackTrace; + Error.captureStackTrace(dummyObject, belowFn || getStackTrace); + const v8StackTrace = dummyObject.stack; + Error.prepareStackTrace = v8Handler; + Error.stackTraceLimit = oldLimit; + return v8StackTrace; + }; + + const getCallingFile = () => { + const stack = getStackTrace(); + let callingFile = stack[2].getFileName(); // [here, withCodSpeed, actual caller] + const gitDir = getGitDir(callingFile); + if (gitDir === undefined) { + throw new Error("Could not find a git repository"); + } + if (callingFile.startsWith("file://")) { + callingFile = fileURLToPath(callingFile); + } + return path.relative(gitDir, callingFile); + }; + + const rawAdd = bench.add; + bench.add = (name, fn, opts) => { + // TODO + const _callingFile = getCallingFile(); + const uri = `${"test/BenchmarkTestCases.benchmark.mjs"}::${name}`; + const options = { ...opts, uri }; + return rawAdd.bind(bench)(name, fn, options); + }; + // TODO + const _rootCallingFile = getCallingFile(); + bench.run = async function run() { + const iterations = bench.opts.iterations - 1; + console.log("[CodSpeed] running"); + setupCore(); + for (const task of bench.tasks) { + await bench.opts.setup?.(task, "run"); + await task.fnOpts.beforeAll?.call(task); + const samples = []; + async function iteration() { + try { + await task.fnOpts.beforeEach?.call(task, "run"); + const start = bench.opts.now(); + await task.fn(); + samples.push(bench.opts.now() - start || 0); + await task.fnOpts.afterEach?.call(this, "run"); + } catch (err) { + if (bench.opts.throws) { + throw err; + } + } + } + while (samples.length < iterations) { + await iteration(); + } + // Codspeed Measure + const uri = + task.opts && "uri" in task.options + ? task.opts.uri + : `${"test/BenchmarkTestCases.benchmark.mjs"}::${task.name}`; + await task.fnOpts.beforeEach?.call(task); + await mongoMeasurement.start(uri); + await (async function __codspeed_root_frame__() { + Measurement.startInstrumentation(); + await task.fn(); + Measurement.stopInstrumentation(uri); + })(); + await mongoMeasurement.stop(uri); + await task.fnOpts.afterEach?.call(task); + console.log(`[Codspeed] ✔ Measured ${uri}`); + await task.fnOpts.afterAll?.call(task); + + await bench.opts.teardown?.(task, "run"); + task.processRunResult({ latencySamples: samples }); + } + teardownCore(); + console.log(`[CodSpeed] Done running ${bench.tasks.length} benches.`); + return bench.tasks; + }; + return bench; +}; + +/** + * @param {number} n number of runs + * @returns {number} distribution + */ +function tDistribution(n) { + // two-sided, 90% + // https://en.wikipedia.org/wiki/Student%27s_t-distribution + if (n <= 30) { + // 1 2 ... + const data = [ + 6.314, 2.92, 2.353, 2.132, 2.015, 1.943, 1.895, 1.86, 1.833, 1.812, 1.796, + 1.782, 1.771, 1.761, 1.753, 1.746, 1.74, 1.734, 1.729, 1.725, 1.721, + 1.717, 1.714, 1.711, 1.708, 1.706, 1.703, 1.701, 1.699, 1.697 + ]; + return data[n - 1]; + } else if (n <= 120) { + // 30 40 50 60 70 80 90 100 110 120 + const data = [ + 1.697, 1.684, 1.676, 1.671, 1.667, 1.664, 1.662, 1.66, 1.659, 1.658 + ]; + const a = data[Math.floor(n / 10) - 3]; + const b = data[Math.ceil(n / 10) - 3]; + const f = n / 10 - Math.floor(n / 10); + + return a * (1 - f) + b * f; + } + + return 1.645; +} + +function buildWebpackConfig( + test, + baseline, + realConfig, + scenario, + testDirectory, + baseOutputPath +) { + const { watch, ...rest } = scenario; + const config = structuredClone({ ...realConfig, ...rest }); + + config.entry = path.resolve( + testDirectory, + config.entry + ? /\.(c|m)?js$/.test(config.entry) + ? config.entry + : `${config.entry}.js` + : "./index.js" + ); + config.devtool = config.devtool || false; + config.name = `${test}-${baseline.name}-${scenario.name}`; + config.context = testDirectory; + config.performance = false; + config.output = config.output || {}; + config.output.path = path.join( + baseOutputPath, + test, + `scenario-${scenario.name}`, + `baseline-${baseline.name}` + ); + config.plugins = config.plugins || []; + if (config.cache) { + config.cache.cacheDirectory = path.resolve(config.output.path, ".cache"); + } + if (watch) { + config.cache = { + type: "memory", + maxGenerations: 1 + }; + } + return config; +} + +function runBuild(webpack, webpackConfig) { + return new Promise((resolve, reject) => { + const compiler = webpack(webpackConfig); + compiler.run((err, stats) => { + if (err) return reject(err); + if (stats && (stats.hasWarnings() || stats.hasErrors())) { + return reject(new Error(stats.toString())); + } + + compiler.close((closeErr) => { + if (closeErr) return reject(closeErr); + if (stats) stats.toString(); // Force stats computation + resolve(); + }); + }); + }); +} + +async function runWatch(webpack, webpackConfig, callback) { + const compiler = webpack(webpackConfig); + return compiler.watch({}, callback); +} + +async function addBuildBench({ + benchInstance, + benchmarkName, + taskName, + webpack, + webpackConfig, + scenario +}) { + benchInstance.add( + taskName, + async () => { + console.time(`Time: ${taskName}`); + await (GENERATE_PROFILE + ? withProfiling(taskName, () => runBuild(webpack, webpackConfig)) + : runBuild(webpack, webpackConfig)); + console.timeEnd(`Time: ${taskName}`); + }, + { + beforeAll() { + this.collectBy = `${benchmarkName}, scenario '${JSON.stringify(scenario)}'`; + } + } + ); +} + +async function addWatchBench({ + benchInstance, + benchmarkName, + taskName, + webpack, + webpackConfig, + scenario +}) { + const entry = path.resolve(webpackConfig.entry); + const originalEntryContent = await fs.readFile(entry, "utf8"); + let watching; + let next; + + const watchCallback = (err, stats) => { + if (next) { + next(err, stats); + } + }; + + benchInstance.add( + taskName, + async () => { + console.time(`Time: ${taskName}`); + + let resolve; + let reject; + + const promise = new Promise((res, rej) => { + resolve = res; + reject = rej; + }); + + next = (err, stats) => { + if (err) { + reject(err); + return; + } + + if (stats.hasWarnings() || stats.hasErrors()) { + reject(new Error(stats.toString())); + return; + } + + // Construct and print stats to be more accurate with real life projects + stats.toString(); + resolve(); + console.timeEnd(`Time: ${taskName}`); + }; + await new Promise((resolve, reject) => { + writeFile( + entry, + `${originalEntryContent};console.log('watch test')`, + (err) => { + if (err) { + reject(err); + return; + } + + resolve(); + } + ); + }); + await promise; + }, + { + async beforeAll() { + this.collectBy = `${benchmarkName}, scenario '${JSON.stringify(scenario)}'`; + + let resolve; + let reject; + + const promise = new Promise((res, rej) => { + resolve = res; + reject = rej; + }); + + next = (err, stats) => { + if (err) { + reject(err); + return; + } + + if (stats.hasWarnings() || stats.hasErrors()) { + reject(new Error(stats.toString())); + return; + } + + // Construct and print stats to be more accurate with real life projects + stats.toString(); + resolve(); + }; + + if (GENERATE_PROFILE) { + await withProfiling( + taskName, + async () => + (watching = await runWatch(webpack, webpackConfig, watchCallback)) + ); + } else { + watching = await runWatch(webpack, webpackConfig, watchCallback); + } + + // Make an extra fs call to warn up filesystem caches + // Also wait a first run callback + await new Promise((resolve, reject) => { + writeFile( + entry, + `${originalEntryContent};console.log('watch test')`, + (err) => { + if (err) { + reject(err); + return; + } + resolve(); + } + ); + }); + + await promise; + }, + async afterAll() { + // Close watching + await new Promise((resolve, reject) => { + if (watching) { + watching.close((closeErr) => { + if (closeErr) { + reject(closeErr); + return; + } + resolve(); + }); + } + }); + + // Write original content + await new Promise((resolve, reject) => { + writeFile(entry, originalEntryContent, (err) => { + if (err) { + reject(err); + return; + } + + resolve(); + }); + }); + } + } + ); +} + +export async function run({ task, casesPath, baseOutputPath }) { + console.log(`Worker ${process.pid}: Running ${task.id}`); + + _baseOutputPath = baseOutputPath; + + const { benchmark, scenario, baselines } = task; + const testDirectory = path.join(casesPath, benchmark); + + const benchInstance = await withCodSpeed( + new Bench({ + now: hrtimeNow, + throws: true, + warmup: true, + warmupIterations: 2, + iterations: 8, + setup(task, mode) { + global.gc(); + console.log(`Setup (${mode} mode): ${task.name}`); + }, + teardown(task, mode) { + console.log(`Teardown (${mode} mode): ${task.name}`); + } + }) + ); + + /** @type {BenchmarkResult[]} */ + const benchResults = []; + + const realConfig = ( + await import( + `${pathToFileURL(path.join(testDirectory, "webpack.config.js"))}` + ) + ).default; + + for (const baseline of baselines) { + const webpackConfig = buildWebpackConfig( + benchmark, + baseline, + realConfig, + scenario, + testDirectory, + baseOutputPath + ); + + const LAST_COMMIT = typeof process.env.LAST_COMMIT !== "undefined"; + + const stringifiedScenario = JSON.stringify(scenario); + const taskName = `benchmark "${benchmark}", scenario '${stringifiedScenario}'${LAST_COMMIT ? "" : ` ${baseline.name} (${baseline.rev})`}`; + const fullTaskName = `benchmark "${benchmark}", scenario '${stringifiedScenario}' ${baseline.name} ${baseline.rev ? `(${baseline.rev})` : ""}`; + + console.log(`Register: ${fullTaskName}`); + + const webpack = ( + await import(pathToFileURL(path.resolve(baseline.path, "./lib/index.js"))) + ).default; + + const params = { + benchInstance, + taskName, + benchmarkName: benchmark, + webpack, + webpackConfig, + scenario + }; + + await (scenario.watch ? addWatchBench(params) : addBuildBench(params)); + } + + benchInstance.addEventListener("cycle", (event) => { + /** @type {TinybenchTask} */ + const task = event.task; + const runs = task.runs; + const nSqrt = Math.sqrt(runs); + const z = tDistribution(runs - 1); + + if (!task.result) return; + + const { latency } = task.result; + const minConfidence = latency.mean - (z * latency.sd) / nSqrt; + const maxConfidence = latency.mean + (z * latency.sd) / nSqrt; + const mean = formatTime(latency.mean); + const deviation = formatTime(latency.sd); + const minConfidenceFormatted = formatTime(minConfidence); + const maxConfidenceFormatted = formatTime(maxConfidence); + const confidence = `${mean} ± ${deviation} [${minConfidenceFormatted}; ${maxConfidenceFormatted}]`; + const text = `${task.name} ${confidence}`; + + console.log(`Cycle: ${task.name} ${confidence} (${runs} runs sampled)`); + + benchResults.push({ + ...latency, + collectBy: task.collectBy, + text, + minConfidence, + maxConfidence + }); + }); + + await benchInstance.run(); + + return { + benchmark, + scenario: scenario.name, + results: benchResults + }; +} diff --git a/yarn.lock b/yarn.lock index 48d960a57..9142ecfaa 100644 --- a/yarn.lock +++ b/yarn.lock @@ -5108,7 +5108,7 @@ jest-watcher@30.0.5: jest-util "30.0.5" string-length "^4.0.2" -jest-worker@30.0.5: +jest-worker@30.0.5, jest-worker@^30.0.5: version "30.0.5" resolved "https://registry.yarnpkg.com/jest-worker/-/jest-worker-30.0.5.tgz#0b85cbab10610303e8d84e214f94d8f052c3cd04" integrity sha512-ojRXsWzEP16NdUuBw/4H/zkZdHOa7MMYCk4E430l+8fELeLg/mqmMlRhjL7UNZvQrDmnovWZV4DxX03fZF48fQ==