Maintain a list of tokens.
# File lib/puppet/parser/lexer.rb, line 89 def initialize @tokens = {} @regex_tokens = [] @string_tokens = [] @tokens_by_string = {} end
Create a new token.
# File lib/puppet/parser/lexer.rb, line 73 def add_token(name, regex, options = {}, &block) raise(ArgumentError, "Token #{name} already exists") if @tokens.include?(name) token = Token.new(regex, name, options) @tokens[token.name] = token if token.string @string_tokens << token @tokens_by_string[token.string] = token else @regex_tokens << token end token.meta_def(:convert, &block) if block_given? token end
Define more tokens.
# File lib/puppet/parser/lexer.rb, line 102 def add_tokens(hash) hash.each do |regex, name| add_token(name, regex) end end
Yield each token name and value in turn.
# File lib/puppet/parser/lexer.rb, line 115 def each @tokens.each {|name, value| yield name, value } end
Look up a token by its value, rather than name.
# File lib/puppet/parser/lexer.rb, line 97 def lookup(string) @tokens_by_string[string] end
Sort our tokens by length, so we know once we match, we’re done. This helps us avoid the O(n^2) nature of token matching.
# File lib/puppet/parser/lexer.rb, line 110 def sort_tokens @string_tokens.sort! { |a, b| b.string.length <=> a.string.length } end