@@ -57,6 +57,26 @@ def [](byteindex)
57
57
end
58
58
end
59
59
60
+ # This represents all of the tokens coming back from the lexer. It is
61
+ # replacing a simple array because it keeps track of the last deleted token
62
+ # from the list for better error messages.
63
+ class TokenList < SimpleDelegator
64
+ attr_reader :last_deleted
65
+
66
+ def initialize ( object )
67
+ super
68
+ @last_deleted = nil
69
+ end
70
+
71
+ def delete ( value )
72
+ @last_deleted = super || @last_deleted
73
+ end
74
+
75
+ def delete_at ( index )
76
+ @last_deleted = super
77
+ end
78
+ end
79
+
60
80
# [String] the source being parsed
61
81
attr_reader :source
62
82
@@ -124,7 +144,7 @@ def initialize(source, *)
124
144
# Most of the time, when a parser event consumes one of these events, it
125
145
# will be deleted from the list. So ideally, this list stays pretty short
126
146
# over the course of parsing a source string.
127
- @tokens = [ ]
147
+ @tokens = TokenList . new ( [ ] )
128
148
129
149
# Here we're going to build up a list of SingleByteString or
130
150
# MultiByteString objects. They're each going to represent a string in the
@@ -174,6 +194,33 @@ def current_column
174
194
line [ column ] . to_i - line . start
175
195
end
176
196
197
+ # Returns the current location that is being looked at for the parser for
198
+ # the purpose of locating the error.
199
+ def find_token_error ( location )
200
+ if location
201
+ # If we explicitly passed a location into this find_token_error method,
202
+ # that means that's the source of the error, so we'll use that
203
+ # information for our error object.
204
+ lineno = location . start_line
205
+ [ lineno , location . start_char - line_counts [ lineno - 1 ] . start ]
206
+ elsif lineno && column
207
+ # If there is a line number associated with the current ripper state,
208
+ # then we'll use that information to generate the error.
209
+ [ lineno , column ]
210
+ elsif ( location = tokens . last_deleted &.location )
211
+ # If we've already deleted a token from the list of tokens that we are
212
+ # consuming, then we'll fall back to that token's location.
213
+ lineno = location . start_line
214
+ [ lineno , location . start_char - line_counts [ lineno - 1 ] . start ]
215
+ else
216
+ # Finally, it's possible that when we hit this error the parsing thread
217
+ # for ripper has died. In that case, lineno and column both return nil.
218
+ # So we're just going to set it to line 1, column 0 in the hopes that
219
+ # that makes any sense.
220
+ [ 1 , 0 ]
221
+ end
222
+ end
223
+
177
224
# As we build up a list of tokens, we'll periodically need to go backwards
178
225
# and find the ones that we've already hit in order to determine the
179
226
# location information for nodes that use them. For example, if you have a
@@ -201,14 +248,7 @@ def find_token(type, value = :any, consume: true, location: nil)
201
248
unless index
202
249
token = value == :any ? type . name . split ( "::" , 2 ) . last : value
203
250
message = "Cannot find expected #{ token } "
204
-
205
- if location
206
- lineno = location . start_line
207
- column = location . start_char - line_counts [ lineno - 1 ] . start
208
- raise ParseError . new ( message , lineno , column )
209
- else
210
- raise ParseError . new ( message , lineno , column )
211
- end
251
+ raise ParseError . new ( message , *find_token_error ( location ) )
212
252
end
213
253
214
254
tokens . delete_at ( index )
0 commit comments