Update checked-in dependencies

This commit is contained in:
github-actions[bot] 2023-07-13 09:09:17 +00:00
parent 4fad06f438
commit 40a500c743
4168 changed files with 298222 additions and 374905 deletions

View file

@ -12,8 +12,6 @@
// Requirements
//------------------------------------------------------------------------------
const { OrderedMap } = require("js-sdsl");
const astUtils = require("./utils/ast-utils");
//------------------------------------------------------------------------------
@ -125,43 +123,48 @@ const KNOWN_NODES = new Set([
/**
* A mutable balanced binary search tree that stores (key, value) pairs. The keys are numeric, and must be unique.
* This is intended to be a generic wrapper around a balanced binary search tree library, so that the underlying implementation
* A mutable map that stores (key, value) pairs. The keys are numeric indices, and must be unique.
* This is intended to be a generic wrapper around a map with non-negative integer keys, so that the underlying implementation
* can easily be swapped out.
*/
class BinarySearchTree {
class IndexMap {
/**
* Creates an empty tree
* Creates an empty map
* @param {number} maxKey The maximum key
*/
constructor() {
this._orderedMap = new OrderedMap();
this._orderedMapEnd = this._orderedMap.end();
constructor(maxKey) {
// Initializing the array with the maximum expected size avoids dynamic reallocations that could degrade performance.
this._values = Array(maxKey + 1);
}
/**
* Inserts an entry into the tree.
* Inserts an entry into the map.
* @param {number} key The entry's key
* @param {any} value The entry's value
* @returns {void}
*/
insert(key, value) {
this._orderedMap.setElement(key, value);
this._values[key] = value;
}
/**
* Finds the entry with the largest key less than or equal to the provided key
* Finds the value of the entry with the largest key less than or equal to the provided key
* @param {number} key The provided key
* @returns {{key: number, value: *}|null} The found entry, or null if no such entry exists.
* @returns {*|undefined} The value of the found entry, or undefined if no such entry exists.
*/
findLe(key) {
const iterator = this._orderedMap.reverseLowerBound(key);
findLastNotAfter(key) {
const values = this._values;
if (iterator.equals(this._orderedMapEnd)) {
return {};
for (let index = key; index >= 0; index--) {
const value = values[index];
if (value) {
return value;
}
}
return { key: iterator.pointer[0], value: iterator.pointer[1] };
return void 0;
}
/**
@ -171,26 +174,7 @@ class BinarySearchTree {
* @returns {void}
*/
deleteRange(start, end) {
// Exit without traversing the tree if the range has zero size.
if (start === end) {
return;
}
const iterator = this._orderedMap.lowerBound(start);
if (iterator.equals(this._orderedMapEnd)) {
return;
}
if (end > this._orderedMap.back()[0]) {
while (!iterator.equals(this._orderedMapEnd)) {
this._orderedMap.eraseElementByIterator(iterator);
}
} else {
while (iterator.pointer[0] < end) {
this._orderedMap.eraseElementByIterator(iterator);
}
}
this._values.fill(void 0, start, end);
}
}
@ -204,15 +188,19 @@ class TokenInfo {
*/
constructor(sourceCode) {
this.sourceCode = sourceCode;
this.firstTokensByLineNumber = sourceCode.tokensAndComments.reduce((map, token) => {
if (!map.has(token.loc.start.line)) {
map.set(token.loc.start.line, token);
this.firstTokensByLineNumber = new Map();
const tokens = sourceCode.tokensAndComments;
for (let i = 0; i < tokens.length; i++) {
const token = tokens[i];
if (!this.firstTokensByLineNumber.has(token.loc.start.line)) {
this.firstTokensByLineNumber.set(token.loc.start.line, token);
}
if (!map.has(token.loc.end.line) && sourceCode.text.slice(token.range[1] - token.loc.end.column, token.range[1]).trim()) {
map.set(token.loc.end.line, token);
if (!this.firstTokensByLineNumber.has(token.loc.end.line) && sourceCode.text.slice(token.range[1] - token.loc.end.column, token.range[1]).trim()) {
this.firstTokensByLineNumber.set(token.loc.end.line, token);
}
return map;
}, new Map());
}
}
/**
@ -252,14 +240,15 @@ class OffsetStorage {
* @param {TokenInfo} tokenInfo a TokenInfo instance
* @param {number} indentSize The desired size of each indentation level
* @param {string} indentType The indentation character
* @param {number} maxIndex The maximum end index of any token
*/
constructor(tokenInfo, indentSize, indentType) {
constructor(tokenInfo, indentSize, indentType, maxIndex) {
this._tokenInfo = tokenInfo;
this._indentSize = indentSize;
this._indentType = indentType;
this._tree = new BinarySearchTree();
this._tree.insert(0, { offset: 0, from: null, force: false });
this._indexMap = new IndexMap(maxIndex);
this._indexMap.insert(0, { offset: 0, from: null, force: false });
this._lockedFirstTokens = new WeakMap();
this._desiredIndentCache = new WeakMap();
@ -267,7 +256,7 @@ class OffsetStorage {
}
_getOffsetDescriptor(token) {
return this._tree.findLe(token.range[0]).value;
return this._indexMap.findLastNotAfter(token.range[0]);
}
/**
@ -388,37 +377,36 @@ class OffsetStorage {
* * key: 820, value: { offset: 1, from: bazToken }
*
* To find the offset descriptor for any given token, one needs to find the node with the largest key
* which is <= token.start. To make this operation fast, the nodes are stored in a balanced binary
* search tree indexed by key.
* which is <= token.start. To make this operation fast, the nodes are stored in a map indexed by key.
*/
const descriptorToInsert = { offset, from: fromToken, force };
const descriptorAfterRange = this._tree.findLe(range[1]).value;
const descriptorAfterRange = this._indexMap.findLastNotAfter(range[1]);
const fromTokenIsInRange = fromToken && fromToken.range[0] >= range[0] && fromToken.range[1] <= range[1];
const fromTokenDescriptor = fromTokenIsInRange && this._getOffsetDescriptor(fromToken);
// First, remove any existing nodes in the range from the tree.
this._tree.deleteRange(range[0] + 1, range[1]);
// First, remove any existing nodes in the range from the map.
this._indexMap.deleteRange(range[0] + 1, range[1]);
// Insert a new node into the tree for this range
this._tree.insert(range[0], descriptorToInsert);
// Insert a new node into the map for this range
this._indexMap.insert(range[0], descriptorToInsert);
/*
* To avoid circular offset dependencies, keep the `fromToken` token mapped to whatever it was mapped to previously,
* even if it's in the current range.
*/
if (fromTokenIsInRange) {
this._tree.insert(fromToken.range[0], fromTokenDescriptor);
this._tree.insert(fromToken.range[1], descriptorToInsert);
this._indexMap.insert(fromToken.range[0], fromTokenDescriptor);
this._indexMap.insert(fromToken.range[1], descriptorToInsert);
}
/*
* To avoid modifying the offset of tokens after the range, insert another node to keep the offset of the following
* tokens the same as it was before.
*/
this._tree.insert(range[1], descriptorAfterRange);
this._indexMap.insert(range[1], descriptorAfterRange);
}
/**
@ -510,7 +498,7 @@ module.exports = {
docs: {
description: "Enforce consistent indentation",
recommended: false,
url: "https://eslint.org/docs/rules/indent"
url: "https://eslint.org/docs/latest/rules/indent"
},
fixable: "whitespace",
@ -703,9 +691,9 @@ module.exports = {
}
}
const sourceCode = context.getSourceCode();
const sourceCode = context.sourceCode;
const tokenInfo = new TokenInfo(sourceCode);
const offsets = new OffsetStorage(tokenInfo, indentSize, indentType === "space" ? " " : "\t");
const offsets = new OffsetStorage(tokenInfo, indentSize, indentType === "space" ? " " : "\t", sourceCode.text.length);
const parameterParens = new WeakSet();
/**
@ -980,19 +968,19 @@ module.exports = {
const parenStack = [];
const parenPairs = [];
tokens.forEach(nextToken => {
for (let i = 0; i < tokens.length; i++) {
const nextToken = tokens[i];
// Accumulate a list of parenthesis pairs
if (astUtils.isOpeningParenToken(nextToken)) {
parenStack.push(nextToken);
} else if (astUtils.isClosingParenToken(nextToken)) {
parenPairs.unshift({ left: parenStack.pop(), right: nextToken });
parenPairs.push({ left: parenStack.pop(), right: nextToken });
}
});
}
parenPairs.forEach(pair => {
const leftParen = pair.left;
const rightParen = pair.right;
for (let i = parenPairs.length - 1; i >= 0; i--) {
const leftParen = parenPairs[i].left;
const rightParen = parenPairs[i].right;
// We only want to handle parens around expressions, so exclude parentheses that are in function parameters and function call arguments.
if (!parameterParens.has(leftParen) && !parameterParens.has(rightParen)) {
@ -1006,7 +994,7 @@ module.exports = {
}
offsets.setDesiredOffset(rightParen, leftParen, 0);
});
}
}
/**
@ -1727,9 +1715,13 @@ module.exports = {
}
// Invoke the queued offset listeners for the nodes that aren't ignored.
listenerCallQueue
.filter(nodeInfo => !ignoredNodes.has(nodeInfo.node))
.forEach(nodeInfo => nodeInfo.listener(nodeInfo.node));
for (let i = 0; i < listenerCallQueue.length; i++) {
const nodeInfo = listenerCallQueue[i];
if (!ignoredNodes.has(nodeInfo.node)) {
nodeInfo.listener(nodeInfo.node);
}
}
// Update the offsets for ignored nodes to prevent their child tokens from being reported.
ignoredNodes.forEach(ignoreNode);
@ -1740,27 +1732,31 @@ module.exports = {
* Create a Map from (tokenOrComment) => (precedingToken).
* This is necessary because sourceCode.getTokenBefore does not handle a comment as an argument correctly.
*/
const precedingTokens = sourceCode.ast.comments.reduce((commentMap, comment) => {
const precedingTokens = new WeakMap();
for (let i = 0; i < sourceCode.ast.comments.length; i++) {
const comment = sourceCode.ast.comments[i];
const tokenOrCommentBefore = sourceCode.getTokenBefore(comment, { includeComments: true });
const hasToken = precedingTokens.has(tokenOrCommentBefore) ? precedingTokens.get(tokenOrCommentBefore) : tokenOrCommentBefore;
return commentMap.set(comment, commentMap.has(tokenOrCommentBefore) ? commentMap.get(tokenOrCommentBefore) : tokenOrCommentBefore);
}, new WeakMap());
precedingTokens.set(comment, hasToken);
}
sourceCode.lines.forEach((line, lineIndex) => {
const lineNumber = lineIndex + 1;
for (let i = 1; i < sourceCode.lines.length + 1; i++) {
if (!tokenInfo.firstTokensByLineNumber.has(lineNumber)) {
if (!tokenInfo.firstTokensByLineNumber.has(i)) {
// Don't check indentation on blank lines
return;
continue;
}
const firstTokenOfLine = tokenInfo.firstTokensByLineNumber.get(lineNumber);
const firstTokenOfLine = tokenInfo.firstTokensByLineNumber.get(i);
if (firstTokenOfLine.loc.start.line !== lineNumber) {
if (firstTokenOfLine.loc.start.line !== i) {
// Don't check the indentation of multi-line tokens (e.g. template literals or block comments) twice.
return;
continue;
}
if (astUtils.isCommentToken(firstTokenOfLine)) {
@ -1785,18 +1781,18 @@ module.exports = {
mayAlignWithBefore && validateTokenIndent(firstTokenOfLine, offsets.getDesiredIndent(tokenBefore)) ||
mayAlignWithAfter && validateTokenIndent(firstTokenOfLine, offsets.getDesiredIndent(tokenAfter))
) {
return;
continue;
}
}
// If the token matches the expected indentation, don't report it.
if (validateTokenIndent(firstTokenOfLine, offsets.getDesiredIndent(firstTokenOfLine))) {
return;
continue;
}
// Otherwise, report the token/comment.
report(firstTokenOfLine, offsets.getDesiredIndent(firstTokenOfLine));
});
}
}
}
);