From d5828549525ff51c58c7063a333ab55dc4777823 Mon Sep 17 00:00:00 2001 From: Ivan Goncharov Date: Mon, 8 Aug 2022 20:02:16 +0300 Subject: [PATCH] parser: limit maximum number of tokens Backport of #3684 Motivation: Parser CPU and memory usage is linear to the number of tokens in a document however in extreme cases it becomes quadratic due to memory exhaustion. On my mashine it happens on queries with 2k tokens. For example: ``` { a a a } ``` It takes 741ms on my machine. But if we create document of the same size but smaller number of tokens it would be a lot faster. Example: ``` { a(arg: "a a" } ``` Now it takes only 17ms to process, which is 43 time faster. That mean if we limit document size we should make this limit small since it take only two bytes to create a token, e.g. ` a`. But that will hart legit documents that have long tokens in them (comments, describtions, strings, long names, etc.). That's why this PR adds a mechanism to limit number of token in parsed document. Also exact same mechanism implemented in graphql-java, see: https://github.com/graphql-java/graphql-java/pull/2549 I also tried alternative approach of counting nodes and it gives slightly better approximation of how many resources would be consumed. However comparing to the tokens, AST nodes is implementation detail of graphql-js so it's imposible to replicate in other implementation (e.g. to count this number on a client). * Apply suggestions from code review Co-authored-by: Yaacov Rydzinski Co-authored-by: Yaacov Rydzinski --- src/language/__tests__/parser-test.ts | 13 ++++++++ src/language/parser.ts | 43 ++++++++++++++++++++++----- 2 files changed, 48 insertions(+), 8 deletions(-) diff --git a/src/language/__tests__/parser-test.ts b/src/language/__tests__/parser-test.ts index 3571b75700..87e7b92c34 100644 --- a/src/language/__tests__/parser-test.ts +++ b/src/language/__tests__/parser-test.ts @@ -84,6 +84,19 @@ describe('Parser', () => { `); }); + it('limit maximum number of tokens', () => { + expect(() => parse('{ foo }', { maxTokens: 3 })).to.not.throw(); + expect(() => parse('{ foo }', { maxTokens: 2 })).to.throw( + 'Syntax Error: Document contains more that 2 tokens. Parsing aborted.', + ); + + expect(() => parse('{ foo(bar: "baz") }', { maxTokens: 8 })).to.not.throw(); + + expect(() => parse('{ foo(bar: "baz") }', { maxTokens: 7 })).to.throw( + 'Syntax Error: Document contains more that 7 tokens. Parsing aborted.', + ); + }); + it('parses variable inline values', () => { expect(() => parse('{ field(complex: { a: { b: [ $var ] } }) }'), diff --git a/src/language/parser.ts b/src/language/parser.ts index ef19af7417..eb54a0376b 100644 --- a/src/language/parser.ts +++ b/src/language/parser.ts @@ -78,6 +78,15 @@ export interface ParseOptions { */ noLocation?: boolean; + /** + * Parser CPU and memory usage is linear to the number of tokens in a document + * however in extreme cases it becomes quadratic due to memory exhaustion. + * Parsing happens before validation so even invalid queries can burn lots of + * CPU time and memory. + * To prevent this you can set a maximum number of tokens allowed within a document. + */ + maxTokens?: number | undefined; + /** * @deprecated will be removed in the v17.0.0 * @@ -179,12 +188,14 @@ export function parseType( export class Parser { protected _options: ParseOptions; protected _lexer: Lexer; + protected _tokenCounter: number; constructor(source: string | Source, options: ParseOptions = {}) { const sourceObj = isSource(source) ? source : new Source(source); this._lexer = new Lexer(sourceObj); this._options = options; + this._tokenCounter = 0; } /** @@ -569,13 +580,13 @@ export class Parser { case TokenKind.BRACE_L: return this.parseObject(isConst); case TokenKind.INT: - this._lexer.advance(); + this.advanceLexer(); return this.node(token, { kind: Kind.INT, value: token.value, }); case TokenKind.FLOAT: - this._lexer.advance(); + this.advanceLexer(); return this.node(token, { kind: Kind.FLOAT, value: token.value, @@ -584,7 +595,7 @@ export class Parser { case TokenKind.BLOCK_STRING: return this.parseStringLiteral(); case TokenKind.NAME: - this._lexer.advance(); + this.advanceLexer(); switch (token.value) { case 'true': return this.node(token, { @@ -630,7 +641,7 @@ export class Parser { parseStringLiteral(): StringValueNode { const token = this._lexer.token; - this._lexer.advance(); + this.advanceLexer(); return this.node(token, { kind: Kind.STRING, value: token.value, @@ -1411,7 +1422,7 @@ export class Parser { expectToken(kind: TokenKind): Token { const token = this._lexer.token; if (token.kind === kind) { - this._lexer.advance(); + this.advanceLexer(); return token; } @@ -1429,7 +1440,7 @@ export class Parser { expectOptionalToken(kind: TokenKind): boolean { const token = this._lexer.token; if (token.kind === kind) { - this._lexer.advance(); + this.advanceLexer(); return true; } return false; @@ -1442,7 +1453,7 @@ export class Parser { expectKeyword(value: string): void { const token = this._lexer.token; if (token.kind === TokenKind.NAME && token.value === value) { - this._lexer.advance(); + this.advanceLexer(); } else { throw syntaxError( this._lexer.source, @@ -1459,7 +1470,7 @@ export class Parser { expectOptionalKeyword(value: string): boolean { const token = this._lexer.token; if (token.kind === TokenKind.NAME && token.value === value) { - this._lexer.advance(); + this.advanceLexer(); return true; } return false; @@ -1548,6 +1559,22 @@ export class Parser { } while (this.expectOptionalToken(delimiterKind)); return nodes; } + + advanceLexer(): void { + const { maxTokens } = this._options; + const token = this._lexer.advance(); + + if (maxTokens !== undefined && token.kind !== TokenKind.EOF) { + ++this._tokenCounter; + if (this._tokenCounter > maxTokens) { + throw syntaxError( + this._lexer.source, + token.start, + `Document contains more that ${maxTokens} tokens. Parsing aborted.`, + ); + } + } + } } /**