perf(buildChunkGraph): avoid unneeded re-visit

This commit is contained in:
Alexander Akait 2024-09-03 14:30:22 +03:00 committed by GitHub
commit 456c70432a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 77 additions and 62 deletions

View File

@ -38,6 +38,7 @@ const { getEntryRuntime, mergeRuntime } = require("./util/runtime");
* @typedef {object} ChunkGroupInfo
* @property {ChunkGroup} chunkGroup the chunk group
* @property {RuntimeSpec} runtime the runtimes
* @property {boolean} init is this chunk group initialized
* @property {bigint | undefined} minAvailableModules current minimal set of modules available at this point
* @property {bigint[]} availableModulesToBeMerged enqueued updates to the minimal set of available modules
* @property {Set<Module>=} skippedItems modules that were skipped because module is already available in parent chunks (need to reconsider when minAvailableModules is shrinking)
@ -345,8 +346,8 @@ const visitModules = (
/** @type {Map<DependenciesBlock, ChunkGroupInfo>} */
const blockChunkGroups = new Map();
/** @type {Map<ChunkGroupInfo, DependenciesBlock>} */
const blockByChunkGroups = new Map();
/** @type {Map<ChunkGroupInfo, Set<DependenciesBlock>>} */
const blocksByChunkGroups = new Map();
/** @type {Map<string, ChunkGroupInfo>} */
const namedChunkGroups = new Map();
@ -367,7 +368,7 @@ const visitModules = (
/** @type {QueueItem[]} */
let queue = [];
/** @type {Map<ChunkGroupInfo, Set<ChunkGroupInfo>>} */
/** @type {Map<ChunkGroupInfo, Set<[ChunkGroupInfo, ChunkGroup, Module | null]>>} */
const queueConnect = new Map();
/** @type {Set<ChunkGroupInfo>} */
const chunkGroupsForCombining = new Set();
@ -382,6 +383,7 @@ const visitModules = (
);
/** @type {ChunkGroupInfo} */
const chunkGroupInfo = {
init: false,
chunkGroup,
runtime,
minAvailableModules: undefined,
@ -452,7 +454,7 @@ const visitModules = (
/** @type {Set<ChunkGroupInfo>} */
const outdatedChunkGroupInfo = new Set();
/** @type {Set<ChunkGroupInfo>} */
/** @type {Set<[ChunkGroupInfo, ChunkGroup, Module | null]>} */
const chunkGroupsForMerging = new Set();
/** @type {QueueItem[]} */
let queueDelayed = [];
@ -505,6 +507,7 @@ const visitModules = (
entrypoint.index = nextChunkGroupIndex++;
cgi = {
chunkGroup: entrypoint,
init: false,
runtime: entrypoint.options.runtime || entrypoint.name,
minAvailableModules: ZERO_BIGINT,
availableModulesToBeMerged: [],
@ -572,6 +575,7 @@ const visitModules = (
maskByChunk.set(c.chunks[0], ZERO_BIGINT);
c.index = nextChunkGroupIndex++;
cgi = {
init: false,
chunkGroup: c,
runtime: chunkGroupInfo.runtime,
minAvailableModules: undefined,
@ -614,7 +618,14 @@ const visitModules = (
blockConnections.set(b, []);
}
blockChunkGroups.set(b, /** @type {ChunkGroupInfo} */ (cgi));
blockByChunkGroups.set(/** @type {ChunkGroupInfo} */ (cgi), b);
let blocks = blocksByChunkGroups.get(/** @type {ChunkGroupInfo} */ (cgi));
if (!blocks) {
blocksByChunkGroups.set(
/** @type {ChunkGroupInfo} */ (cgi),
(blocks = new Set())
);
}
blocks.add(b);
} else if (entryOptions) {
entrypoint = /** @type {Entrypoint} */ (cgi.chunkGroup);
} else {
@ -636,19 +647,9 @@ const visitModules = (
connectList = new Set();
queueConnect.set(chunkGroupInfo, connectList);
}
connectList.add(/** @type {ChunkGroupInfo} */ (cgi));
// TODO check if this really need to be done for each traversal
// or if it is enough when it's queued when created
// 4. We enqueue the DependenciesBlock for traversal
queueDelayed.push({
action: PROCESS_BLOCK,
block: b,
module,
chunk: c.chunks[0],
chunkGroup: c,
chunkGroupInfo: /** @type {ChunkGroupInfo} */ (cgi)
});
connectList.add(
/** @type {[ChunkGroupInfo, ChunkGroup, Module]} */ ([cgi, c, module])
);
} else if (entrypoint !== undefined) {
chunkGroupInfo.chunkGroup.addAsyncEntrypoint(entrypoint);
}
@ -901,11 +902,10 @@ const visitModules = (
for (const [chunkGroupInfo, targets] of queueConnect) {
// 1. Add new targets to the list of children
if (chunkGroupInfo.children === undefined) {
chunkGroupInfo.children = targets;
} else {
for (const target of targets) {
chunkGroupInfo.children.add(target);
}
chunkGroupInfo.children = new Set();
}
for (const [target] of targets) {
chunkGroupInfo.children.add(target);
}
// 2. Calculate resulting available modules
@ -915,9 +915,9 @@ const visitModules = (
const runtime = chunkGroupInfo.runtime;
// 3. Update chunk group info
for (const target of targets) {
for (const [target, chunkGroup, module] of targets) {
target.availableModulesToBeMerged.push(resultingAvailableModules);
chunkGroupsForMerging.add(target);
chunkGroupsForMerging.add([target, chunkGroup, module]);
const oldRuntime = target.runtime;
const newRuntime = mergeRuntime(oldRuntime, runtime);
if (oldRuntime !== newRuntime) {
@ -935,7 +935,7 @@ const visitModules = (
statProcessedChunkGroupsForMerging += chunkGroupsForMerging.size;
// Execute the merge
for (const info of chunkGroupsForMerging) {
for (const [info, chunkGroup, module] of chunkGroupsForMerging) {
const availableModulesToBeMerged = info.availableModulesToBeMerged;
const cachedMinAvailableModules = info.minAvailableModules;
let minAvailableModules = cachedMinAvailableModules;
@ -958,6 +958,20 @@ const visitModules = (
info.resultingAvailableModules = undefined;
outdatedChunkGroupInfo.add(info);
}
if ((!info.init || changed) && module) {
info.init = true;
for (const b of blocksByChunkGroups.get(info)) {
queueDelayed.push({
action: PROCESS_BLOCK,
block: b,
module,
chunk: chunkGroup.chunks[0],
chunkGroup,
chunkGroupInfo: info
});
}
}
}
chunkGroupsForMerging.clear();
};
@ -1057,7 +1071,7 @@ const visitModules = (
connectList = new Set();
queueConnect.set(info, connectList);
}
connectList.add(cgi);
connectList.add([cgi, cgi.chunkGroup, module]);
}
}
@ -1117,48 +1131,44 @@ const visitModules = (
for (const info of outdatedOrderIndexChunkGroups) {
const { chunkGroup, runtime } = info;
const block = blockByChunkGroups.get(info);
const blocks = blocksByChunkGroups.get(info);
if (!block) {
if (!blocks) {
continue;
}
let preOrderIndex = 0;
let postOrderIndex = 0;
for (const block of blocks) {
let preOrderIndex = 0;
let postOrderIndex = 0;
/**
* @param {DependenciesBlock} current current
* @param {BlocksWithNestedBlocks} visited visited dependencies blocks
*/
const process = (current, visited) => {
const blockModules = getBlockModules(current, runtime);
for (let i = 0, len = blockModules.length; i < len; i += 3) {
const activeState = /** @type {ConnectionState} */ (
blockModules[i + 1]
);
if (activeState === false) {
continue;
}
const refModule = /** @type {Module} */ (blockModules[i]);
if (visited.has(refModule)) {
continue;
}
/**
* @param {DependenciesBlock} current current
* @param {BlocksWithNestedBlocks} visited visited dependencies blocks
*/
const process = (current, visited) => {
const blockModules = getBlockModules(current, runtime);
if (blockModules === undefined) {
return;
}
visited.add(refModule);
for (let i = 0, len = blockModules.length; i < len; i += 3) {
const activeState = /** @type {ConnectionState} */ (
blockModules[i + 1]
);
if (activeState === false) {
continue;
if (refModule) {
chunkGroup.setModulePreOrderIndex(refModule, preOrderIndex++);
process(refModule, visited);
chunkGroup.setModulePostOrderIndex(refModule, postOrderIndex++);
}
}
const refModule = /** @type {Module} */ (blockModules[i]);
if (visited.has(refModule)) {
continue;
}
visited.add(refModule);
if (refModule) {
chunkGroup.setModulePreOrderIndex(refModule, preOrderIndex++);
process(refModule, visited);
chunkGroup.setModulePostOrderIndex(refModule, postOrderIndex++);
}
}
};
process(block, new Set());
};
process(block, new Set());
}
}
outdatedOrderIndexChunkGroups.clear();
ordinalByModule.clear();

View File

@ -0,0 +1,5 @@
module.exports = {
optimization: {
moduleIds: "named"
}
};