diff --git a/graphql.go b/graphql.go index 06ffd459..a4f9b73d 100644 --- a/graphql.go +++ b/graphql.go @@ -34,7 +34,7 @@ func ParseSchema(schemaString string, resolver interface{}, opts ...SchemaOpt) ( opt(s) } - if err := s.schema.Parse(schemaString); err != nil { + if err := s.schema.Parse(schemaString, s.useStringDescriptions); err != nil { return nil, err } @@ -63,16 +63,27 @@ type Schema struct { schema *schema.Schema res *resolvable.Schema - maxDepth int - maxParallelism int - tracer trace.Tracer - validationTracer trace.ValidationTracer - logger log.Logger + maxDepth int + maxParallelism int + tracer trace.Tracer + validationTracer trace.ValidationTracer + logger log.Logger + useStringDescriptions bool } // SchemaOpt is an option to pass to ParseSchema or MustParseSchema. type SchemaOpt func(*Schema) +// UseStringDescriptions enables the usage of double quoted and triple quoted +// strings as descriptions as per the June 2018 spec +// https://facebook.github.io/graphql/June2018/. When this is not enabled, +// comments are parsed as descriptions instead. +func UseStringDescriptions() SchemaOpt { + return func(s *Schema) { + s.useStringDescriptions = true + } +} + // MaxDepth specifies the maximum field nesting depth in a query. The default is 0 which disables max depth checking. func MaxDepth(n int) SchemaOpt { return func(s *Schema) { diff --git a/internal/common/lexer.go b/internal/common/lexer.go index a38fcbaf..8b3176c9 100644 --- a/internal/common/lexer.go +++ b/internal/common/lexer.go @@ -2,6 +2,7 @@ package common import ( "fmt" + "strconv" "strings" "text/scanner" @@ -11,9 +12,10 @@ import ( type syntaxError string type Lexer struct { - sc *scanner.Scanner - next rune - descComment string + sc *scanner.Scanner + next rune + descComment string + useStringDescriptions bool } type Ident struct { @@ -21,13 +23,13 @@ type Ident struct { Loc errors.Location } -func NewLexer(s string) *Lexer { +func NewLexer(s string, useStringDescriptions bool) *Lexer { sc := &scanner.Scanner{ Mode: scanner.ScanIdents | scanner.ScanInts | scanner.ScanFloats | scanner.ScanStrings, } sc.Init(strings.NewReader(s)) - return &Lexer{sc: sc} + return &Lexer{sc: sc, useStringDescriptions: useStringDescriptions} } func (l *Lexer) CatchSyntaxError(f func()) (errRes *errors.QueryError) { @@ -50,13 +52,15 @@ func (l *Lexer) Peek() rune { return l.next } -// Consume whitespace and tokens equivalent to whitespace (e.g. commas and comments). +// ConsumeWhitespace consumes whitespace and tokens equivalent to whitespace (e.g. commas and comments). // // Consumed comment characters will build the description for the next type or field encountered. -// The description is available from `DescComment()`, and will be reset every time `Consume()` is -// executed. -func (l *Lexer) Consume() { - l.descComment = "" +// The description is available from `DescComment()`, and will be reset every time `ConsumeWhitespace()` is +// executed unless l.useStringDescriptions is set. +func (l *Lexer) ConsumeWhitespace() { + if !l.useStringDescriptions { + l.descComment = "" + } for { l.next = l.sc.Scan() @@ -84,6 +88,31 @@ func (l *Lexer) Consume() { } } +// consumeDescription optionally consumes a description based on the June 2018 graphql spec if any are present. +// +// Single quote strings are also single line. Triple quote strings can be multi-line. Triple quote strings +// whitespace trimmed on both ends. +// If a description is found, consume any following comments as well +// +// http://facebook.github.io/graphql/June2018/#sec-Descriptions +func (l *Lexer) consumeDescription() bool { + // If the next token is not a string, we don't consume it + if l.next == scanner.String { + // a triple quote string is an empty "string" followed by an open quote due to the way the parser treats strings as one token + l.descComment = "" + tokenText := l.sc.TokenText() + if l.sc.Peek() == '"' { + // Consume the third quote + l.next = l.sc.Next() + l.consumeTripleQuoteComment() + } else { + l.consumeStringComment(tokenText) + } + return true + } + return false +} + func (l *Lexer) ConsumeIdent() string { name := l.sc.TokenText() l.ConsumeToken(scanner.Ident) @@ -101,12 +130,12 @@ func (l *Lexer) ConsumeKeyword(keyword string) { if l.next != scanner.Ident || l.sc.TokenText() != keyword { l.SyntaxError(fmt.Sprintf("unexpected %q, expecting %q", l.sc.TokenText(), keyword)) } - l.Consume() + l.ConsumeWhitespace() } func (l *Lexer) ConsumeLiteral() *BasicLit { lit := &BasicLit{Type: l.next, Text: l.sc.TokenText()} - l.Consume() + l.ConsumeWhitespace() return lit } @@ -114,10 +143,15 @@ func (l *Lexer) ConsumeToken(expected rune) { if l.next != expected { l.SyntaxError(fmt.Sprintf("unexpected %q, expecting %s", l.sc.TokenText(), scanner.TokenString(expected))) } - l.Consume() + l.ConsumeWhitespace() } func (l *Lexer) DescComment() string { + if l.useStringDescriptions { + if l.consumeDescription() { + l.ConsumeWhitespace() + } + } return l.descComment } @@ -132,11 +166,41 @@ func (l *Lexer) Location() errors.Location { } } +func (l *Lexer) consumeTripleQuoteComment() { + if l.next != '"' { + panic("consumeTripleQuoteComment used in wrong context: no third quote?") + } + + var comment string + var numQuotes int + for { + l.next = l.sc.Next() + if l.next == '"' { + numQuotes++ + } else { + numQuotes = 0 + } + comment += string(l.next) + if numQuotes == 3 || l.next == scanner.EOF { + break + } + } + l.descComment += strings.TrimSpace(comment[:len(comment)-numQuotes]) +} + +func (l *Lexer) consumeStringComment(str string) { + value, err := strconv.Unquote(str) + if err != nil { + panic(err) + } + l.descComment += value +} + // consumeComment consumes all characters from `#` to the first encountered line terminator. // The characters are appended to `l.descComment`. func (l *Lexer) consumeComment() { if l.next != '#' { - return + panic("consumeComment used in wrong context") } // TODO: count and trim whitespace so we can dedent any following lines. @@ -144,7 +208,7 @@ func (l *Lexer) consumeComment() { l.sc.Next() } - if l.descComment != "" { + if l.descComment != "" && !l.useStringDescriptions { // TODO: use a bytes.Buffer or strings.Builder instead of this. l.descComment += "\n" } @@ -155,7 +219,9 @@ func (l *Lexer) consumeComment() { break } - // TODO: use a bytes.Buffer or strings.Build instead of this. - l.descComment += string(next) + if !l.useStringDescriptions { + // TODO: use a bytes.Buffer or strings.Build instead of this. + l.descComment += string(next) + } } } diff --git a/internal/common/lexer_test.go b/internal/common/lexer_test.go index 4f811f7f..40e967ed 100644 --- a/internal/common/lexer_test.go +++ b/internal/common/lexer_test.go @@ -7,32 +7,84 @@ import ( ) type consumeTestCase struct { - description string - definition string - expected string // expected description + description string + definition string + expected string // expected description + failureExpected bool + useStringDescriptions bool } +// Note that these tests stop as soon as they parse the comments, so even though the rest of the file will fail to parse sometimes, the tests still pass var consumeTests = []consumeTestCase{{ - description: "initial test", + description: "no string descriptions allowed in old mode", definition: ` # Comment line 1 -# Comment line 2 +#Comment line 2 ,,,,,, # Commas are insignificant +"New style comments" type Hello { world: String! }`, - expected: "Comment line 1\nComment line 2\nCommas are insignificant", + expected: "Comment line 1\nComment line 2\nCommas are insignificant", + useStringDescriptions: false, +}, { + description: "simple string descriptions allowed in new mode", + definition: ` + +# Comment line 1 +#Comment line 2 +,,,,,, # Commas are insignificant +"New style comments" +type Hello { + world: String! +}`, + expected: "New style comments", + useStringDescriptions: true, +}, { + description: "comment after description works", + definition: ` + +# Comment line 1 +#Comment line 2 +,,,,,, # Commas are insignificant +type Hello { + world: String! +}`, + expected: "", + useStringDescriptions: true, +}, { + description: "triple quote descriptions allowed in new mode", + definition: ` + +# Comment line 1 +#Comment line 2 +,,,,,, # Commas are insignificant +""" +New style comments +Another line +""" +type Hello { + world: String! +}`, + expected: "New style comments\nAnother line", + useStringDescriptions: true, }} func TestConsume(t *testing.T) { for _, test := range consumeTests { t.Run(test.description, func(t *testing.T) { - lex := common.NewLexer(test.definition) + lex := common.NewLexer(test.definition, test.useStringDescriptions) - err := lex.CatchSyntaxError(lex.Consume) - if err != nil { - t.Fatal(err) + err := lex.CatchSyntaxError(func() { lex.ConsumeWhitespace() }) + if test.failureExpected { + if err == nil { + t.Fatalf("schema should have been invalid; comment: %s", lex.DescComment()) + } + } else { + if err != nil { + t.Fatal(err) + } } if test.expected != lex.DescComment() { diff --git a/internal/query/query.go b/internal/query/query.go index faba4d2a..fffc88e7 100644 --- a/internal/query/query.go +++ b/internal/query/query.go @@ -94,7 +94,7 @@ func (InlineFragment) isSelection() {} func (FragmentSpread) isSelection() {} func Parse(queryString string) (*Document, *errors.QueryError) { - l := common.NewLexer(queryString) + l := common.NewLexer(queryString, false) var doc *Document err := l.CatchSyntaxError(func() { doc = parseDocument(l) }) @@ -107,7 +107,7 @@ func Parse(queryString string) (*Document, *errors.QueryError) { func parseDocument(l *common.Lexer) *Document { d := &Document{} - l.Consume() + l.ConsumeWhitespace() for l.Peek() != scanner.EOF { if l.Peek() == '{' { op := &Operation{Type: Query, Loc: l.Location()} diff --git a/internal/schema/meta.go b/internal/schema/meta.go index b48bf7ac..365e740a 100644 --- a/internal/schema/meta.go +++ b/internal/schema/meta.go @@ -5,7 +5,7 @@ var Meta *Schema func init() { Meta = &Schema{} // bootstrap Meta = New() - if err := Meta.Parse(metaSrc); err != nil { + if err := Meta.Parse(metaSrc, false); err != nil { panic(err) } } @@ -167,7 +167,7 @@ var metaSrc = ` inputFields: [__InputValue!] ofType: __Type } - + # An enum describing what kind of type a given ` + "`" + `__Type` + "`" + ` is. enum __TypeKind { # Indicates this type is a scalar. diff --git a/internal/schema/schema.go b/internal/schema/schema.go index e549f17c..569b26b2 100644 --- a/internal/schema/schema.go +++ b/internal/schema/schema.go @@ -246,8 +246,8 @@ func New() *Schema { } // Parse the schema string. -func (s *Schema) Parse(schemaString string) error { - l := common.NewLexer(schemaString) +func (s *Schema) Parse(schemaString string, useStringDescriptions bool) error { + l := common.NewLexer(schemaString, useStringDescriptions) err := l.CatchSyntaxError(func() { parseSchema(s, l) }) if err != nil { @@ -389,7 +389,7 @@ func resolveInputObject(s *Schema, values common.InputValueList) error { } func parseSchema(s *Schema, l *common.Lexer) { - l.Consume() + l.ConsumeWhitespace() for l.Peek() != scanner.EOF { desc := l.DescComment() diff --git a/internal/schema/schema_internal_test.go b/internal/schema/schema_internal_test.go index 9159eeec..d652f5d5 100644 --- a/internal/schema/schema_internal_test.go +++ b/internal/schema/schema_internal_test.go @@ -18,7 +18,7 @@ func TestParseInterfaceDef(t *testing.T) { tests := []testCase{{ description: "Parses simple interface", definition: "Greeting { field: String }", - expected: &Interface{Name: "Greeting", Fields: []*Field{&Field{Name: "field"}}}, + expected: &Interface{Name: "Greeting", Fields: []*Field{{Name: "field"}}}, }} for _, test := range tests { @@ -158,8 +158,8 @@ func compareObjects(t *testing.T, expected, actual *Object) { func setup(t *testing.T, def string) *common.Lexer { t.Helper() - lex := common.NewLexer(def) - lex.Consume() + lex := common.NewLexer(def, false) + lex.ConsumeWhitespace() return lex } diff --git a/internal/schema/schema_test.go b/internal/schema/schema_test.go index e656fabf..5ee5156d 100644 --- a/internal/schema/schema_test.go +++ b/internal/schema/schema_test.go @@ -80,7 +80,7 @@ func TestParse(t *testing.T) { t.Skip("TODO: add support for descriptions") schema := setup(t) - err := schema.Parse(test.sdl) + err := schema.Parse(test.sdl, false) if err != nil { t.Fatal(err) } diff --git a/internal/validation/validate_max_depth_test.go b/internal/validation/validate_max_depth_test.go index 9fa74b0b..4dc13e66 100644 --- a/internal/validation/validate_max_depth_test.go +++ b/internal/validation/validate_max_depth_test.go @@ -105,7 +105,7 @@ func (tc maxDepthTestCase) Run(t *testing.T, s *schema.Schema) { func TestMaxDepth(t *testing.T) { s := schema.New() - err := s.Parse(simpleSchema) + err := s.Parse(simpleSchema, false) if err != nil { t.Fatal(err) } @@ -181,7 +181,7 @@ func TestMaxDepth(t *testing.T) { func TestMaxDepthInlineFragments(t *testing.T) { s := schema.New() - err := s.Parse(interfaceSimple) + err := s.Parse(interfaceSimple, false) if err != nil { t.Fatal(err) } @@ -230,7 +230,7 @@ func TestMaxDepthInlineFragments(t *testing.T) { func TestMaxDepthFragmentSpreads(t *testing.T) { s := schema.New() - err := s.Parse(interfaceSimple) + err := s.Parse(interfaceSimple, false) if err != nil { t.Fatal(err) } @@ -317,7 +317,7 @@ func TestMaxDepthFragmentSpreads(t *testing.T) { func TestMaxDepthUnknownFragmentSpreads(t *testing.T) { s := schema.New() - err := s.Parse(interfaceSimple) + err := s.Parse(interfaceSimple, false) if err != nil { t.Fatal(err) } @@ -352,7 +352,7 @@ func TestMaxDepthUnknownFragmentSpreads(t *testing.T) { func TestMaxDepthValidation(t *testing.T) { s := schema.New() - err := s.Parse(interfaceSimple) + err := s.Parse(interfaceSimple, false) if err != nil { t.Fatal(err) } diff --git a/internal/validation/validation_test.go b/internal/validation/validation_test.go index a2bf6141..52b6f2c6 100644 --- a/internal/validation/validation_test.go +++ b/internal/validation/validation_test.go @@ -39,7 +39,7 @@ func TestValidate(t *testing.T) { schemas := make([]*schema.Schema, len(testData.Schemas)) for i, schemaStr := range testData.Schemas { schemas[i] = schema.New() - if err := schemas[i].Parse(schemaStr); err != nil { + if err := schemas[i].Parse(schemaStr, false); err != nil { t.Fatal(err) } }