mirror of
https://github.com/kemko/liquid.git
synced 2026-01-01 15:55:40 +03:00
Remove the Token class from the lexer in favour of less smart but faster arrays
This commit is contained in:
@@ -1,30 +1,4 @@
|
|||||||
module Liquid
|
module Liquid
|
||||||
class Token
|
|
||||||
attr_accessor :type, :contents
|
|
||||||
def initialize(*args)
|
|
||||||
@type, @contents = args
|
|
||||||
end
|
|
||||||
|
|
||||||
def self.[](*args)
|
|
||||||
Token.new(*args)
|
|
||||||
end
|
|
||||||
|
|
||||||
def inspect
|
|
||||||
out = "<#{@type}"
|
|
||||||
out << ": \'#{@contents}\'" if contents
|
|
||||||
out << '>'
|
|
||||||
end
|
|
||||||
|
|
||||||
def to_s
|
|
||||||
self.inspect
|
|
||||||
end
|
|
||||||
|
|
||||||
def ==(other)
|
|
||||||
return unless other && other.respond_to?(:type) && other.respond_to?(:contents)
|
|
||||||
@type == other.type && @contents == other.contents
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
class Lexer
|
class Lexer
|
||||||
SPECIALS = {
|
SPECIALS = {
|
||||||
'|' => :pipe,
|
'|' => :pipe,
|
||||||
@@ -51,7 +25,7 @@ module Liquid
|
|||||||
loop do
|
loop do
|
||||||
tok = next_token
|
tok = next_token
|
||||||
unless tok
|
unless tok
|
||||||
@output << Token[:end_of_string]
|
@output << [:end_of_string]
|
||||||
return @output
|
return @output
|
||||||
end
|
end
|
||||||
@output << tok
|
@output << tok
|
||||||
@@ -59,16 +33,16 @@ module Liquid
|
|||||||
end
|
end
|
||||||
|
|
||||||
def next_token
|
def next_token
|
||||||
consume_whitespace
|
@ss.skip(/\s*/)
|
||||||
return if @ss.eos?
|
return if @ss.eos?
|
||||||
|
|
||||||
case
|
case
|
||||||
when t = @ss.scan(COMPARISON_OPERATOR) then Token[:comparison, t]
|
when t = @ss.scan(COMPARISON_OPERATOR) then [:comparison, t]
|
||||||
when t = @ss.scan(SINGLE_STRING_LITERAL) then Token[:string, t]
|
when t = @ss.scan(SINGLE_STRING_LITERAL) then [:string, t]
|
||||||
when t = @ss.scan(DOUBLE_STRING_LITERAL) then Token[:string, t]
|
when t = @ss.scan(DOUBLE_STRING_LITERAL) then [:string, t]
|
||||||
when t = @ss.scan(FLOAT_LITERAL) then Token[:float, t]
|
when t = @ss.scan(FLOAT_LITERAL) then [:float, t]
|
||||||
when t = @ss.scan(INTEGER_LITERAL) then Token[:integer, t]
|
when t = @ss.scan(INTEGER_LITERAL) then [:integer, t]
|
||||||
when t = @ss.scan(IDENTIFIER) then Token[:id, t]
|
when t = @ss.scan(IDENTIFIER) then [:id, t]
|
||||||
else
|
else
|
||||||
lex_specials
|
lex_specials
|
||||||
end
|
end
|
||||||
@@ -78,14 +52,10 @@ module Liquid
|
|||||||
def lex_specials
|
def lex_specials
|
||||||
c = @ss.getch
|
c = @ss.getch
|
||||||
if s = SPECIALS[c]
|
if s = SPECIALS[c]
|
||||||
return Token[s,c]
|
return [s,c]
|
||||||
end
|
end
|
||||||
|
|
||||||
raise SyntaxError, "Unexpected character #{c}."
|
raise SyntaxError, "Unexpected character #{c}."
|
||||||
end
|
end
|
||||||
|
|
||||||
def consume_whitespace
|
|
||||||
@ss.skip(/\s*/)
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
@@ -14,11 +14,11 @@ module Liquid
|
|||||||
|
|
||||||
def consume(type = nil)
|
def consume(type = nil)
|
||||||
token = @tokens[@p]
|
token = @tokens[@p]
|
||||||
if type && token.type != type
|
if type && token[0] != type
|
||||||
raise SyntaxError, "Expected #{type} but found #{@tokens[@p]}"
|
raise SyntaxError, "Expected #{type} but found #{@tokens[@p]}"
|
||||||
end
|
end
|
||||||
@p += 1
|
@p += 1
|
||||||
token.contents
|
token[1]
|
||||||
end
|
end
|
||||||
|
|
||||||
# Only consumes the token if it matches the type
|
# Only consumes the token if it matches the type
|
||||||
@@ -26,35 +26,35 @@ module Liquid
|
|||||||
# or false otherwise.
|
# or false otherwise.
|
||||||
def consume?(type)
|
def consume?(type)
|
||||||
token = @tokens[@p]
|
token = @tokens[@p]
|
||||||
return false unless token && token.type == type
|
return false unless token && token[0] == type
|
||||||
@p += 1
|
@p += 1
|
||||||
token.contents
|
token[1]
|
||||||
end
|
end
|
||||||
|
|
||||||
# Like consume? Except for an :id token of a certain name
|
# Like consume? Except for an :id token of a certain name
|
||||||
def id?(str)
|
def id?(str)
|
||||||
token = @tokens[@p]
|
token = @tokens[@p]
|
||||||
return false unless token && token.type == :id
|
return false unless token && token[0] == :id
|
||||||
return false unless token.contents == str
|
return false unless token[1] == str
|
||||||
@p += 1
|
@p += 1
|
||||||
token.contents
|
token[1]
|
||||||
end
|
end
|
||||||
|
|
||||||
def look(type, ahead = 0)
|
def look(type, ahead = 0)
|
||||||
tok = @tokens[@p + ahead]
|
tok = @tokens[@p + ahead]
|
||||||
return false unless tok
|
return false unless tok
|
||||||
tok.type == type
|
tok[0] == type
|
||||||
end
|
end
|
||||||
|
|
||||||
# === General Liquid parsing functions ===
|
# === General Liquid parsing functions ===
|
||||||
|
|
||||||
def expression
|
def expression
|
||||||
token = @tokens[@p]
|
token = @tokens[@p]
|
||||||
if token.type == :id
|
if token[0] == :id
|
||||||
variable_signature
|
variable_signature
|
||||||
elsif [:string, :integer, :float].include? token.type
|
elsif [:string, :integer, :float].include? token[0]
|
||||||
consume
|
consume
|
||||||
token.contents
|
token[1]
|
||||||
else
|
else
|
||||||
raise SyntaxError, "#{token} is not a valid expression."
|
raise SyntaxError, "#{token} is not a valid expression."
|
||||||
end
|
end
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ results = profiler.run_profile
|
|||||||
puts 'Success'
|
puts 'Success'
|
||||||
puts
|
puts
|
||||||
|
|
||||||
[RubyProf::FlatPrinter, RubyProf::GraphPrinter, RubyProf::GraphHtmlPrinter, RubyProf::CallTreePrinter].each do |klass|
|
[RubyProf::FlatPrinter, RubyProf::GraphHtmlPrinter, RubyProf::CallTreePrinter, RubyProf::DotPrinter].each do |klass|
|
||||||
filename = (ENV['TMP'] || '/tmp') + (klass.name.include?('Html') ? "/liquid.#{klass.name.downcase}.html" : "/callgrind.liquid.#{klass.name.downcase}.txt")
|
filename = (ENV['TMP'] || '/tmp') + (klass.name.include?('Html') ? "/liquid.#{klass.name.downcase}.html" : "/callgrind.liquid.#{klass.name.downcase}.txt")
|
||||||
filename.gsub!(/:+/, '_')
|
filename.gsub!(/:+/, '_')
|
||||||
File.open(filename, "w+") { |fp| klass.new(results).print(fp, :print_file => true) }
|
File.open(filename, "w+") { |fp| klass.new(results).print(fp, :print_file => true) }
|
||||||
|
|||||||
@@ -5,39 +5,39 @@ class LexerTest < Test::Unit::TestCase
|
|||||||
|
|
||||||
def test_strings
|
def test_strings
|
||||||
tokens = Lexer.new(%! 'this is a test""' "wat 'lol'"!).tokenize
|
tokens = Lexer.new(%! 'this is a test""' "wat 'lol'"!).tokenize
|
||||||
assert_equal [Token[:string,%!'this is a test""'!], Token[:string, %!"wat 'lol'"!], Token[:end_of_string]], tokens
|
assert_equal [[:string,%!'this is a test""'!], [:string, %!"wat 'lol'"!], [:end_of_string]], tokens
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_integer
|
def test_integer
|
||||||
tokens = Lexer.new('hi 50').tokenize
|
tokens = Lexer.new('hi 50').tokenize
|
||||||
assert_equal [Token[:id,'hi'], Token[:integer, '50'], Token[:end_of_string]], tokens
|
assert_equal [[:id,'hi'], [:integer, '50'], [:end_of_string]], tokens
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_float
|
def test_float
|
||||||
tokens = Lexer.new('hi 5.0').tokenize
|
tokens = Lexer.new('hi 5.0').tokenize
|
||||||
assert_equal [Token[:id,'hi'], Token[:float, '5.0'], Token[:end_of_string]], tokens
|
assert_equal [[:id,'hi'], [:float, '5.0'], [:end_of_string]], tokens
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_comparison
|
def test_comparison
|
||||||
tokens = Lexer.new('== <> contains').tokenize
|
tokens = Lexer.new('== <> contains').tokenize
|
||||||
assert_equal [Token[:comparison,'=='], Token[:comparison, '<>'], Token[:comparison, 'contains'], Token[:end_of_string]], tokens
|
assert_equal [[:comparison,'=='], [:comparison, '<>'], [:comparison, 'contains'], [:end_of_string]], tokens
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_specials
|
def test_specials
|
||||||
tokens = Lexer.new('| .:').tokenize
|
tokens = Lexer.new('| .:').tokenize
|
||||||
assert_equal [Token[:pipe, '|'], Token[:dot, '.'], Token[:colon, ':'], Token[:end_of_string]], tokens
|
assert_equal [[:pipe, '|'], [:dot, '.'], [:colon, ':'], [:end_of_string]], tokens
|
||||||
tokens = Lexer.new('[,]').tokenize
|
tokens = Lexer.new('[,]').tokenize
|
||||||
assert_equal [Token[:open_square, '['], Token[:comma, ','], Token[:close_square, ']'], Token[:end_of_string]], tokens
|
assert_equal [[:open_square, '['], [:comma, ','], [:close_square, ']'], [:end_of_string]], tokens
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_fancy_identifiers
|
def test_fancy_identifiers
|
||||||
tokens = Lexer.new('hi! five?').tokenize
|
tokens = Lexer.new('hi! five?').tokenize
|
||||||
assert_equal [Token[:id,'hi!'], Token[:id, 'five?'], Token[:end_of_string]], tokens
|
assert_equal [[:id,'hi!'], [:id, 'five?'], [:end_of_string]], tokens
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_whitespace
|
def test_whitespace
|
||||||
tokens = Lexer.new("five|\n\t ==").tokenize
|
tokens = Lexer.new("five|\n\t ==").tokenize
|
||||||
assert_equal [Token[:id,'five'], Token[:pipe, '|'], Token[:comparison, '=='], Token[:end_of_string]], tokens
|
assert_equal [[:id,'five'], [:pipe, '|'], [:comparison, '=='], [:end_of_string]], tokens
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_unexpected_character
|
def test_unexpected_character
|
||||||
@@ -48,8 +48,8 @@ class LexerTest < Test::Unit::TestCase
|
|||||||
|
|
||||||
def test_next_token
|
def test_next_token
|
||||||
l = Lexer.new('hi 5.0')
|
l = Lexer.new('hi 5.0')
|
||||||
assert_equal Token[:id, 'hi'], l.next_token
|
assert_equal [:id, 'hi'], l.next_token
|
||||||
assert_equal Token[:float, '5.0'], l.next_token
|
assert_equal [:float, '5.0'], l.next_token
|
||||||
assert_nil l.next_token
|
assert_nil l.next_token
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
Reference in New Issue
Block a user