Compare commits

...

31 Commits

Author SHA1 Message Date
Isha
57fdbe81b8 filter args 2014-03-03 03:38:19 +00:00
Isha
331cf067b1 filter names no args 2014-03-03 02:53:12 +00:00
Isha
bcb3f831b6 get_quoted_fragment 2014-03-03 01:05:57 +00:00
Isha
16d5f40474 wip 2014-03-03 01:02:18 +00:00
Isha
02525f258e extract name 2014-03-02 23:24:31 +00:00
Isha
a119e86fd0 remove old regexy lax_parse 2014-02-28 20:07:21 +00:00
Isha
111f24aad1 Use isspace\n\n\nThx Dylan 2014-02-28 20:05:29 +00:00
Isha
d0ed4711b2 try not using regexes ar all, because they are evil 2014-02-28 19:31:52 +00:00
Isha
f367dd7915 don't really need those I suppose 2014-02-28 12:35:15 -05:00
Isha
edd28a144a Comment out lax_parse test cases for now 2014-02-28 12:34:46 -05:00
Isha
8669029afd Comment out verbose log message 2014-02-28 12:33:48 -05:00
Isha
8f42e50959 wip 2014-02-28 12:24:37 -05:00
Isha
25cc69c3c0 add variable in c 2014-02-28 12:23:26 -05:00
Dylan Thacker-Smith
03d586aafe Add convenience methods for getting a struct from a ruby object.
If we are trying to get the struct from something other than self, then we
should make sure to check the class of the object.  This util functions
make this easier.
2014-02-28 10:08:55 -05:00
Dylan Thacker-Smith
dc8a34a52f Implement Block#parse_body in C. 2014-02-28 07:47:36 -05:00
Dylan Thacker-Smith
99cebf74bc Rename Block#parse to parse_body since that is how it is being used. 2014-02-27 23:16:11 -05:00
Dylan Thacker-Smith
7eb64886dc Move the parse method out of Tag, only blocks need the body parsed.
The parse method should be renamed to something like parse_body,
since that is how it is used, and no non-block tags were using the
parse method.
2014-02-27 22:31:09 -05:00
Dylan Thacker-Smith
f89046e81f Use super rather than render_all in single block render classes. 2014-02-27 21:38:49 -05:00
Dylan Thacker-Smith
9ee4573ef4 Avoid keeping track of two lists of nodes during parsing. 2014-02-27 20:51:05 -05:00
Dylan Thacker-Smith
a48b4f47f6 Return nil in Document#block_delimiter rather than an empty array.
The block delimiter is normally a string, so nil makes more sense when
there is no delimiter. We also don't want to allocate an array for no
reason.
2014-02-27 20:06:57 -05:00
Dylan Thacker-Smith
72d402837e Remove unused Block#end_tag method.
Although the method is called, it is defined with an empty body and not
overridden to do anything else.
2014-02-27 18:53:18 -05:00
Dylan Thacker-Smith
06bef40527 Fix a missing return warning. 2014-02-27 18:47:55 -05:00
Dylan Thacker-Smith
a48b245e6e Turn on C compiler warnings. 2014-02-27 18:45:57 -05:00
Dylan Thacker-Smith
d4aabda625 Avoid freeing of uninitialized memory.
Thanks to Isha for pointing this out.
2014-02-27 18:32:19 -05:00
Dylan Thacker-Smith
dab6bdfdee Make sure the ext directory is included in the distributed gem. 2014-02-27 15:50:43 -05:00
Dylan Thacker-Smith
8c075fca1f Remove a couple FIXME comments which are only partially a lie.
I added those comments before creating an invalid token type to return the
error. However, we still aren't making use of the token type.
2014-02-27 15:21:57 -05:00
Dylan Thacker-Smith
ea8406e36e Create a Liquid::Tokenizer class in the C extension. 2014-02-27 15:20:22 -05:00
Dylan Thacker-Smith
8bb3bca64a Require the liquid extension when liquid is required. 2014-02-27 14:22:18 -05:00
Dylan Thacker-Smith
5de1082201 Add profile:stackprof rake task. 2014-02-27 11:20:49 -05:00
Dylan Thacker-Smith
7ba02d2811 Use start and end of string rather than line matching in regexes. 2014-02-27 10:07:04 -05:00
Dylan Thacker-Smith
2066676bf4 Add a C extension that doesn't yet do anything. 2014-02-27 09:58:33 -05:00
35 changed files with 733 additions and 155 deletions

3
.gitignore vendored
View File

@@ -5,3 +5,6 @@ pkg
*.rbc
.rvmrc
.ruby-version
*.bundle
/tmp
Gemfile.lock

3
Gemfile Normal file
View File

@@ -0,0 +1,3 @@
source 'https://rubygems.org'
gemspec

View File

@@ -1,5 +1,6 @@
require 'rake'
require 'rake/testtask'
require 'rake/extensiontask'
$LOAD_PATH.unshift File.expand_path("../lib", __FILE__)
require "liquid/version"
@@ -64,6 +65,10 @@ namespace :profile do
ruby "./performance/profile.rb"
end
task :stackprof do
ruby "./performance/stackprof.rb"
end
desc "Run KCacheGrind"
task :grind => :run do
system "qcachegrind /tmp/liquid.rubyprof_calltreeprinter.txt"
@@ -75,3 +80,8 @@ desc "Run example"
task :example do
ruby "-w -d -Ilib example/server/server.rb"
end
Rake::ExtensionTask.new "liquid" do |ext|
ext.lib_dir = "lib/liquid"
end
Rake::Task[:test].prerequisites << :compile

View File

@@ -13,7 +13,7 @@ class LiquidServlet < WEBrick::HTTPServlet::AbstractServlet
def handle(type, req, res)
@request, @response = req, res
@request.path_info =~ /(\w+)$/
@request.path_info =~ /(\w+)\z/
@action = $1 || 'index'
@assigns = send(@action) if respond_to?(@action)

168
ext/liquid/block.c Normal file
View File

@@ -0,0 +1,168 @@
#include "liquid_ext.h"
VALUE cLiquidBlock;
ID intern_assert_missing_delimitation, intern_block_delimiter, intern_is_blank, intern_new,
intern_new_with_options, intern_tags, intern_unknown_tag, intern_unterminated_tag,
intern_unterminated_variable;
struct liquid_tag
{
char *name, *markup;
long name_length, markup_length;
};
static bool parse_tag(struct liquid_tag *tag, char *token, long token_length)
{
// Strip {{ and }} braces
token += 2;
token_length -= 4;
char *end = token + token_length;
while (token < end && isspace(*token))
token++;
tag->name = token;
char c = *token;
while (token < end && (isalnum(c) || c == '_'))
c = *(++token);
tag->name_length = token - tag->name;
if (!tag->name_length) {
memset(tag, 0, sizeof(*tag));
return false;
}
while (token < end && isspace(*token))
token++;
tag->markup = token;
char *last = end - 1;
while (token < last && isspace(*last))
last--;
end = last + 1;
tag->markup_length = end - token;
return true;
}
static VALUE rb_parse_body(VALUE self, VALUE tokenizerObj)
{
struct liquid_tokenizer *tokenizer = LIQUID_TOKENIZER_GET_STRUCT(tokenizerObj);
bool blank = true;
VALUE nodelist = rb_iv_get(self, "@nodelist");
if (nodelist == Qnil) {
nodelist = rb_ary_new();
rb_iv_set(self, "@nodelist", nodelist);
} else {
rb_ary_clear(nodelist);
}
struct token token;
while (true) {
liquid_tokenizer_next(tokenizer, &token);
switch (token.type) {
case TOKEN_NONE:
/*
* Make sure that it's ok to end parsing in the current block.
* Effectively this method will throw an exception unless the current block is
* of type Document
*/
rb_funcall(self, intern_assert_missing_delimitation, 0);
goto done;
case TOKEN_INVALID:
{
VALUE token_obj = rb_str_new(token.str, token.length);
if (token.str[1] == '%')
rb_funcall(self, intern_unterminated_tag, 1, token_obj);
else
rb_funcall(self, intern_unterminated_variable, 1, token_obj);
break;
}
case TOKEN_TAG:
{
struct liquid_tag tag;
if (!parse_tag(&tag, token.str, token.length)) {
// FIXME: provide more appropriate error message
rb_funcall(self, intern_unterminated_tag, 1, rb_str_new(token.str, token.length));
} else {
if (tag.name_length >= 3 && !memcmp(tag.name, "end", 3)) {
VALUE block_delimiter = rb_funcall(self, intern_block_delimiter, 0);
if (TYPE(block_delimiter) == T_STRING &&
tag.name_length == RSTRING_LEN(block_delimiter) &&
!memcmp(tag.name, RSTRING_PTR(block_delimiter), tag.name_length))
{
goto done;
}
}
VALUE tags = rb_funcall(cLiquidTemplate, intern_tags, 0);
Check_Type(tags, T_HASH);
VALUE tag_name = rb_str_new(tag.name, tag.name_length);
VALUE tag_class = rb_hash_lookup(tags, tag_name);
VALUE markup = rb_str_new(tag.markup, tag.markup_length);
if (tag_class != Qnil) {
VALUE options = rb_iv_get(self, "@options");
if (options == Qnil)
options = rb_hash_new();
VALUE new_tag = rb_funcall(tag_class, intern_new_with_options, 4,
tag_name, markup, tokenizerObj, options);
if (blank) {
VALUE blank_block = rb_funcall(new_tag, intern_is_blank, 0);
if (blank_block == Qnil || blank_block == Qfalse)
blank = false;
}
rb_ary_push(nodelist, new_tag);
} else {
rb_funcall(self, intern_unknown_tag, 3, tag_name, markup, tokenizerObj);
/*
* multi-block tags may store the nodelist in a block array on unknown_tag
* then replace @nodelist with a new array. We need to use the new array
* for the block following the tag token.
*/
nodelist = rb_iv_get(self, "@nodelist");
}
}
break;
}
case TOKEN_VARIABLE:
{
VALUE markup = rb_str_new(token.str + 2, token.length - 4);
VALUE options = rb_iv_get(self, "@options");
VALUE new_var = rb_funcall(cLiquidVariable, intern_new, 2, markup, options);
rb_ary_push(nodelist, new_var);
blank = false;
break;
}
case TOKEN_STRING:
rb_ary_push(nodelist, rb_str_new(token.str, token.length));
if (blank) {
int i;
for (i = 0; i < token.length; i++) {
if (!isspace(token.str[i])) {
blank = false;
break;
}
}
}
break;
}
}
done:
rb_iv_set(self, "@blank", blank ? Qtrue : Qfalse);
return Qnil;
}
void init_liquid_block()
{
intern_assert_missing_delimitation = rb_intern("assert_missing_delimitation!");
intern_block_delimiter = rb_intern("block_delimiter");
intern_is_blank = rb_intern("blank?");
intern_new = rb_intern("new");
intern_new_with_options = rb_intern("new_with_options");
intern_tags = rb_intern("tags");
intern_unknown_tag = rb_intern("unknown_tag");
intern_unterminated_tag = rb_intern("unterminated_tag");
intern_unterminated_variable = rb_intern("unterminated_variable");
cLiquidBlock = rb_define_class_under(mLiquid, "Block", cLiquidTag);
rb_define_method(cLiquidBlock, "parse_body", rb_parse_body, 1);
}

8
ext/liquid/block.h Normal file
View File

@@ -0,0 +1,8 @@
#ifndef LIQUID_BLOCK_H
#define LIQUID_BLOCK_H
void init_liquid_block();
extern VALUE cLiquidBlock;
#endif

3
ext/liquid/extconf.rb Normal file
View File

@@ -0,0 +1,3 @@
require 'mkmf'
$CFLAGS << ' -Wall'
create_makefile("liquid/liquid")

16
ext/liquid/liquid_ext.c Normal file
View File

@@ -0,0 +1,16 @@
#include "liquid_ext.h"
VALUE mLiquid;
VALUE cLiquidTemplate, cLiquidTag, cLiquidVariable;
void Init_liquid(void)
{
mLiquid = rb_define_module("Liquid");
cLiquidTemplate = rb_define_class_under(mLiquid, "Template", rb_cObject);
cLiquidTag = rb_define_class_under(mLiquid, "Tag", rb_cObject);
cLiquidVariable = rb_define_class_under(mLiquid, "Variable", rb_cObject);
init_liquid_tokenizer();
init_liquid_block();
init_liquid_variable();
}

16
ext/liquid/liquid_ext.h Normal file
View File

@@ -0,0 +1,16 @@
#ifndef LIQUID_EXT_H
#define LIQUID_EXT_H
#include <stdbool.h>
#include <ctype.h>
#include <ruby.h>
#include "tokenizer.h"
#include "block.h"
#include "utils.h"
#include "variable.h"
extern VALUE mLiquid;
extern VALUE cLiquidTemplate, cLiquidTag, cLiquidVariable;
#endif

113
ext/liquid/tokenizer.c Normal file
View File

@@ -0,0 +1,113 @@
#include "liquid_ext.h"
VALUE cLiquidTokenizer;
static void free_tokenizer(void *ptr)
{
struct liquid_tokenizer *tokenizer = ptr;
xfree(tokenizer);
}
static VALUE rb_allocate(VALUE klass)
{
VALUE obj;
struct liquid_tokenizer *tokenizer;
obj = Data_Make_Struct(klass, struct liquid_tokenizer, NULL, free_tokenizer, tokenizer);
return obj;
}
static VALUE rb_initialize(VALUE self, VALUE source)
{
struct liquid_tokenizer *tokenizer;
Check_Type(source, T_STRING);
Data_Get_Struct(self, struct liquid_tokenizer, tokenizer);
tokenizer->cursor = RSTRING_PTR(source);
tokenizer->length = RSTRING_LEN(source);
return Qnil;
}
void liquid_tokenizer_next(struct liquid_tokenizer *tokenizer, struct token *token)
{
if (tokenizer->length <= 0) {
memset(token, 0, sizeof(*token));
return;
}
token->type = TOKEN_STRING;
char *cursor = tokenizer->cursor;
char *last = tokenizer->cursor + tokenizer->length - 1;
while (cursor < last) {
if (*cursor++ != '{')
continue;
char c = *cursor++;
if (c != '%' && c != '{')
continue;
if (cursor - tokenizer->cursor > 2) {
token->type = TOKEN_STRING;
cursor -= 2;
goto found;
}
char *incomplete_end = cursor;
token->type = TOKEN_INVALID;
if (c == '%') {
while (cursor < last) {
if (*cursor++ != '%')
continue;
c = *cursor++;
while (c == '%' && cursor <= last)
c = *cursor++;
if (c != '}')
continue;
token->type = TOKEN_TAG;
goto found;
}
cursor = incomplete_end;
goto found;
} else {
while (cursor < last) {
if (*cursor++ != '}')
continue;
if (*cursor++ != '}') {
incomplete_end = cursor - 1;
continue;
}
token->type = TOKEN_VARIABLE;
goto found;
}
cursor = incomplete_end;
goto found;
}
}
cursor = last + 1;
found:
token->str = tokenizer->cursor;
token->length = cursor - tokenizer->cursor;
tokenizer->cursor += token->length;
tokenizer->length -= token->length;
}
static VALUE rb_next(VALUE self)
{
struct liquid_tokenizer *tokenizer;
Data_Get_Struct(self, struct liquid_tokenizer, tokenizer);
struct token token;
liquid_tokenizer_next(tokenizer, &token);
if (token.type == TOKEN_NONE)
return Qnil;
return rb_str_new(token.str, token.length);
}
void init_liquid_tokenizer()
{
cLiquidTokenizer = rb_define_class_under(mLiquid, "Tokenizer", rb_cObject);
rb_define_alloc_func(cLiquidTokenizer, rb_allocate);
rb_define_method(cLiquidTokenizer, "initialize", rb_initialize, 1);
rb_define_method(cLiquidTokenizer, "next", rb_next, 0);
rb_define_alias(cLiquidTokenizer, "shift", "next");
}

30
ext/liquid/tokenizer.h Normal file
View File

@@ -0,0 +1,30 @@
#ifndef LIQUID_TOKENIZER_H
#define LIQUID_TOKENIZER_H
extern VALUE cLiquidTokenizer;
enum token_type {
TOKEN_NONE,
TOKEN_INVALID,
TOKEN_STRING,
TOKEN_TAG,
TOKEN_VARIABLE
};
struct token {
enum token_type type;
char *str;
int length;
};
struct liquid_tokenizer {
char *cursor;
int length;
};
void init_liquid_tokenizer();
void liquid_tokenizer_next(struct liquid_tokenizer *tokenizer, struct token *token);
#define LIQUID_TOKENIZER_GET_STRUCT(obj) ((struct liquid_tokenizer *)obj_get_data_ptr(obj, cLiquidTokenizer))
#endif

21
ext/liquid/utils.c Normal file
View File

@@ -0,0 +1,21 @@
#include <ruby.h>
void raise_type_error(VALUE expected, VALUE got)
{
rb_raise(rb_eTypeError, "wrong argument type %s (expected %s)",
rb_class2name(got), rb_class2name(expected));
}
void check_class(VALUE obj, int type, VALUE klass)
{
Check_Type(obj, type);
VALUE obj_klass = RBASIC_CLASS(obj);
if (obj_klass != klass)
raise_type_error(klass, obj_klass);
}
void *obj_get_data_ptr(VALUE obj, VALUE klass)
{
check_class(obj, T_DATA, klass);
return DATA_PTR(obj);
}

8
ext/liquid/utils.h Normal file
View File

@@ -0,0 +1,8 @@
#ifndef LIQUID_UTILS_H
#define LIQUID_UTILS_H
void raise_type_error(VALUE expected, VALUE got);
void check_class(VALUE klass);
void *obj_get_data_ptr(VALUE obj, VALUE klass);
#endif

179
ext/liquid/variable.c Normal file
View File

@@ -0,0 +1,179 @@
#include "liquid_ext.h"
VALUE cLiquidVariable;
extern VALUE mLiquid;
static void free_variable(void *ptr)
{
struct liquid_variable *variable = ptr;
xfree(variable);
}
static VALUE rb_variable_allocate(VALUE klass)
{
VALUE obj;
struct liquid_variable *variable;
obj = Data_Make_Struct(klass, struct liquid_variable, NULL, free_variable, variable);
return obj;
}
static inline int skip_whitespace(char * str, int len)
{
int skipped = 0; char * ptr = str;
while (skipped < len && isspace(*ptr))
{skipped++; ptr++;}
return skipped;
}
static char * get_quoted_fragment(char * str, int len, int * ret_size, int * end_offset, bool colon)
{
int p = 0; /* Current position in string */
int start = -1, end = -1; /* Start and end indices for the found string */
char quoted_by = -1; /* Is the current part of string quoted by a single or double quote? If so
ignore any special chars */
while (p < len) {
switch (str[p]) {
case '"':
if (start == -1) {start = p; quoted_by = '"';}
else if (str[start] == '"') {end = p; goto quoted_fragment_found;}
else if (quoted_by == -1) quoted_by = '"';
else if (quoted_by == '"') quoted_by = -1;
break;
case '\'':
if (start == -1) {start = p; quoted_by = '\'';}
else if (str[start] == '\'') {end = p; goto quoted_fragment_found;}
else if (quoted_by == -1) quoted_by = '\'';
else if (quoted_by == '\'') quoted_by = -1;
break;
case ':':
if (colon)
if (start != -1 && quoted_by == -1) {end = p-1; goto quoted_fragment_found;}
else
if (start == -1) start = p;
break;
case '|':
case ',':
case '\n':
case '\r':
case '\f':
case '\t':
case '\v':
case ' ':
if (start != -1 && quoted_by == -1) {end = p-1; goto quoted_fragment_found;}
break;
default:
if (start == -1) start = p;
break;
}
p++;
}
if (p == len && start != -1 && end == -1) end = len-1;
quoted_fragment_found:
if (end >= start) {
*ret_size = end-start+1;
*end_offset = end+1;
return &str[start];
} else {
*ret_size = 0;
return NULL;
}
}
static VALUE get_filters(char * str, int len, VALUE self) {
VALUE filters_arr = rb_ary_new();
int p = 0;
int ret_size, end_offset;
char * f;
while(p<len) {
if (str[p] == '|') {
VALUE filter = rb_ary_new();
VALUE f_args = rb_ary_new();
p += skip_whitespace(&str[p+1], len-p-1);
f = get_quoted_fragment(&str[p], len-p, &ret_size, &end_offset, true);
p += end_offset;
if (f) {
if (f[ret_size-1] == ':') ret_size--;
rb_ary_push(filter, rb_str_new(f, ret_size));
}
/* Check for filter arguments */
do {
if (p<len) {
p += skip_whitespace(&str[p], len-p);
// printf("\n1. %.*s\n", len-p, &str[p]);
if (str[p] != '|') {
f = get_quoted_fragment(&str[p], len-p, &ret_size, &end_offset, false);
// printf("\n2. %.*s\n", ret_size, f);
p += end_offset;
p += skip_whitespace(&str[p], len-p);
if (str[p] == '|') p--;
if (f) rb_ary_push(f_args, rb_str_new(f, ret_size));
} else p--;
}
} while (str[p] == ',' || str[p] == ':');
rb_ary_push(filter, f_args);
/* Add to filters_arr array */
rb_ary_push(filters_arr, filter);
}
p++;
}
return filters_arr;
}
static VALUE rb_variable_lax_parse(VALUE self, VALUE m)
{
char * markup = RSTRING_PTR(m);
int markup_len = RSTRING_LEN(m);
char * cursor = markup; int cursor_pos = 0;
VALUE filters_arr;
int size, end_offset;
/* Extract name */
cursor_pos += skip_whitespace(markup, markup_len);
cursor = markup + cursor_pos;
cursor = get_quoted_fragment(cursor, markup_len - cursor_pos, &size, &end_offset, false);
if (cursor == NULL) {
rb_iv_set(self, "@name", Qnil);
filters_arr = rb_ary_new();
rb_iv_set(self, "@filters", filters_arr);
}
else
{
rb_iv_set(self, "@name", rb_str_new(cursor, size));
/* Extract filters */
if (end_offset < markup_len) {
cursor = &markup[end_offset];
filters_arr = get_filters(cursor, markup_len - end_offset, self);
rb_iv_set(self, "@filters", filters_arr);
}
}
return filters_arr;
}
void init_liquid_variable()
{
cLiquidVariable = rb_define_class_under(mLiquid, "Variable", rb_cObject);
rb_define_alloc_func(cLiquidVariable, rb_variable_allocate);
rb_define_method(cLiquidVariable, "lax_parse", rb_variable_lax_parse, 1);
}

13
ext/liquid/variable.h Normal file
View File

@@ -0,0 +1,13 @@
#ifndef LIQUID_VARIABLE_H
#define LIQUID_VARIABLE_H
#include <regex.h>
struct liquid_variable {
char *markup; long markup_len;
char *name; long name_len;
};
void init_liquid_variable();
#endif

View File

@@ -30,21 +30,18 @@ module Liquid
VariableSegment = /[\w\-]/
VariableStart = /\{\{/
VariableEnd = /\}\}/
VariableIncompleteEnd = /\}\}?/
QuotedString = /"[^"]*"|'[^']*'/
QuotedFragment = /#{QuotedString}|(?:[^\s,\|'"]|#{QuotedString})+/o
StrictQuotedFragment = /"[^"]+"|'[^']+'|[^\s|:,]+/
FirstFilterArgument = /#{FilterArgumentSeparator}(?:#{StrictQuotedFragment})/o
OtherFilterArgument = /#{ArgumentSeparator}(?:#{StrictQuotedFragment})/o
SpacelessFilter = /^(?:'[^']+'|"[^"]+"|[^'"])*#{FilterSeparator}(?:#{StrictQuotedFragment})(?:#{FirstFilterArgument}(?:#{OtherFilterArgument})*)?/o
SpacelessFilter = /\A(?:'[^']+'|"[^"]+"|[^'"])*#{FilterSeparator}(?:#{StrictQuotedFragment})(?:#{FirstFilterArgument}(?:#{OtherFilterArgument})*)?/o
Expression = /(?:#{QuotedFragment}(?:#{SpacelessFilter})*)/o
TagAttributes = /(\w+)\s*\:\s*(#{QuotedFragment})/o
AnyStartingTag = /\{\{|\{\%/
PartialTemplateParser = /#{TagStart}.*?#{TagEnd}|#{VariableStart}.*?#{VariableIncompleteEnd}/o
TemplateParser = /(#{PartialTemplateParser}|#{AnyStartingTag})/o
VariableParser = /\[[^\]]+\]|#{VariableSegment}+\??/o
end
require 'liquid/liquid'
require "liquid/version"
require 'liquid/lexer'
require 'liquid/parser'

View File

@@ -1,82 +1,26 @@
module Liquid
class Block < Tag
IsTag = /^#{TagStart}/o
IsVariable = /^#{VariableStart}/o
FullToken = /^#{TagStart}\s*(\w+)\s*(.*)?#{TagEnd}$/o
ContentOfVariable = /^#{VariableStart}(.*)#{VariableEnd}$/o
def initialize(tag_name, markup, tokens)
super
parse_body(tokens)
end
def blank?
@blank || false
end
def parse(tokens)
@blank = true
@nodelist ||= []
@nodelist.clear
# All child tags of the current block.
@children = []
while token = tokens.shift
case token
when IsTag
if token =~ FullToken
# if we found the proper block delimiter just end parsing here and let the outer block
# proceed
if block_delimiter == $1
end_tag
return
end
# fetch the tag from registered blocks
if tag = Template.tags[$1]
new_tag = tag.new_with_options($1, $2, tokens, @options || {})
@blank &&= new_tag.blank?
@nodelist << new_tag
@children << new_tag
else
# this tag is not registered with the system
# pass it to the current block for special handling or error reporting
unknown_tag($1, $2, tokens)
end
else
raise SyntaxError.new(options[:locale].t("errors.syntax.tag_termination", :token => token, :tag_end => TagEnd.inspect))
end
when IsVariable
new_var = create_variable(token)
@nodelist << new_var
@children << new_var
@blank = false
when ''
# pass
else
@nodelist << token
@blank &&= (token =~ /\A\s*\z/)
end
end
# Make sure that it's ok to end parsing in the current block.
# Effectively this method will throw an exception unless the current block is
# of type Document
assert_missing_delimitation!
end
# warnings of this block and all sub-tags
def warnings
all_warnings = []
all_warnings.concat(@warnings) if @warnings
(@children || []).each do |node|
all_warnings.concat(node.warnings || [])
(nodelist || []).each do |node|
all_warnings.concat(node.warnings || []) if node.respond_to?(:warnings)
end
all_warnings
end
def end_tag
end
def unknown_tag(tag, params, tokens)
case tag
when 'else'
@@ -99,19 +43,20 @@ module Liquid
@tag_name
end
def create_variable(token)
token.scan(ContentOfVariable) do |content|
return Variable.new(content.first, @options)
end
raise SyntaxError.new(options[:locale].t("errors.syntax.variable_termination", :token => token, :tag_end => VariableEnd.inspect))
end
def render(context)
render_all(@nodelist, context)
end
protected
def unterminated_variable(token)
raise SyntaxError.new(options[:locale].t("errors.syntax.variable_termination", :token => token, :tag_end => VariableEnd.inspect))
end
def unterminated_tag(token)
raise SyntaxError.new(options[:locale].t("errors.syntax.tag_termination", :token => token, :tag_end => TagEnd.inspect))
end
def assert_missing_delimitation!
raise SyntaxError.new(options[:locale].t("errors.syntax.tag_never_closed", :block_name => block_name))
end

View File

@@ -171,15 +171,15 @@ module Liquid
LITERALS[key]
else
case key
when /^'(.*)'$/ # Single quoted strings
when /\A'(.*)'\z/ # Single quoted strings
$1
when /^"(.*)"$/ # Double quoted strings
when /\A"(.*)"\z/ # Double quoted strings
$1
when /^(-?\d+)$/ # Integer and floats
when /\A(-?\d+)\z/ # Integer and floats
$1.to_i
when /^\((\S+)\.\.(\S+)\)$/ # Ranges
when /\A\((\S+)\.\.(\S+)\)\z/ # Ranges
(resolve($1).to_i..resolve($2).to_i)
when /^(-?\d[\d\.]+)$/ # Floats
when /\A(-?\d[\d\.]+)\z/ # Floats
$1.to_f
else
variable(key)
@@ -218,7 +218,7 @@ module Liquid
# assert_equal 'tobi', @context['hash["name"]']
def variable(markup)
parts = markup.scan(VariableParser)
square_bracketed = /^\[(.*)\]$/
square_bracketed = /\A\[(.*)\]\z/
first_part = parts.shift

View File

@@ -3,12 +3,12 @@ module Liquid
# we don't need markup to open this block
def initialize(tokens, options = {})
@options = options
parse(tokens)
parse_body(tokens)
end
# There isn't a real delimiter
def block_delimiter
[]
nil
end
# Document blocks don't need to be terminated since they are not actually opened

View File

@@ -57,7 +57,7 @@ module Liquid
end
def full_path(template_path)
raise FileSystemError, "Illegal template name '#{template_path}'" unless template_path =~ /^[^.\/][a-zA-Z0-9_\/]+$/
raise FileSystemError, "Illegal template name '#{template_path}'" unless template_path =~ /\A[^.\/][a-zA-Z0-9_\/]+\z/
full_path = if template_path.include?('/')
File.join(root, File.dirname(template_path), @pattern % File.basename(template_path))
@@ -65,7 +65,7 @@ module Liquid
File.join(root, @pattern % template_path)
end
raise FileSystemError, "Illegal template path '#{File.expand_path(full_path)}'" unless File.expand_path(full_path) =~ /^#{File.expand_path(root)}/
raise FileSystemError, "Illegal template path '#{File.expand_path(full_path)}'" unless File.expand_path(full_path) =~ /\A#{File.expand_path(root)}/
full_path
end

View File

@@ -55,7 +55,7 @@ module Liquid
col += 1
result << "<td class=\"col#{col}\">" << render_all(@nodelist, context) << '</td>'
result << "<td class=\"col#{col}\">" << super << '</td>'
if col == cols and (index != length - 1)
col = 0

View File

@@ -190,7 +190,7 @@ module Liquid
return input.to_s
end
if ((input.is_a?(String) && !/^\d+$/.match(input.to_s).nil?) || input.is_a?(Integer)) && input.to_i > 0
if ((input.is_a?(String) && !/\A\d+\z/.match(input.to_s).nil?) || input.is_a?(Integer)) && input.to_i > 0
input = Time.at(input.to_i)
end
@@ -281,7 +281,7 @@ module Liquid
when Numeric
obj
when String
(obj.strip =~ /^\d+\.\d+$/) ? BigDecimal.new(obj) : obj.to_i
(obj.strip =~ /\A\d+\.\d+\z/) ? BigDecimal.new(obj) : obj.to_i
else
0
end

View File

@@ -16,10 +16,6 @@ module Liquid
@tag_name = tag_name
@markup = markup
@options ||= {} # needs || because might be set before initialize
parse(tokens)
end
def parse(tokens)
end
def name

View File

@@ -12,8 +12,8 @@ module Liquid
# <div class="green"> Item five</div>
#
class Cycle < Tag
SimpleSyntax = /^#{QuotedFragment}+/o
NamedSyntax = /^(#{QuotedFragment})\s*\:\s*(.*)/o
SimpleSyntax = /\A#{QuotedFragment}+/o
NamedSyntax = /\A(#{QuotedFragment})\s*\:\s*(.*)/o
def initialize(tag_name, markup, tokens)
case markup

View File

@@ -4,7 +4,7 @@ module Liquid
def render(context)
context.stack do
output = render_all(@nodelist, context)
output = super
if output != context.registers[:ifchanged]
context.registers[:ifchanged] = output

View File

@@ -35,9 +35,6 @@ module Liquid
super
end
def parse(tokens)
end
def blank?
false
end

View File

@@ -1,17 +1,14 @@
module Liquid
class Raw < Block
FullTokenPossiblyInvalid = /^(.*)#{TagStart}\s*(\w+)\s*(.*)?#{TagEnd}$/o
FullTokenPossiblyInvalid = /\A(.*)#{TagStart}\s*(\w+)\s*(.*)?#{TagEnd}\z/o
def parse(tokens)
def parse_body(tokens)
@nodelist ||= []
@nodelist.clear
while token = tokens.shift
if token =~ FullTokenPossiblyInvalid
@nodelist << $1 if $1 != ""
if block_delimiter == $2
end_tag
return
end
return if block_delimiter == $2
end
@nodelist << token if not token.empty?
end

View File

@@ -162,16 +162,9 @@ module Liquid
private
# Uses the <tt>Liquid::TemplateParser</tt> regexp to tokenize the passed source
def tokenize(source)
source = source.source if source.respond_to?(:source)
return [] if source.to_s.empty?
tokens = source.split(TemplateParser)
# removes the rogue empty element at the beginning of the array
tokens.shift if tokens[0] and tokens[0].empty?
tokens
Tokenizer.new(source.to_s)
end
end

View File

@@ -11,8 +11,7 @@ module Liquid
# {{ user | link }}
#
class Variable
FilterParser = /(?:#{FilterSeparator}|(?:\s*(?:#{QuotedFragment}|#{ArgumentSeparator})\s*)+)/o
EasyParse = /^ *(\w+(?:\.\w+)*) *$/
EasyParse = /\A *(\w+(?:\.\w+)*) *\z/
attr_accessor :filters, :name, :warnings
def initialize(markup, options = {})
@@ -35,22 +34,22 @@ module Liquid
end
end
def lax_parse(markup)
@filters = []
if match = markup.match(/\s*(#{QuotedFragment})(.*)/o)
@name = match[1]
if match[2].match(/#{FilterSeparator}\s*(.*)/o)
filters = Regexp.last_match(1).scan(FilterParser)
filters.each do |f|
if matches = f.match(/\s*(\w+)/)
filtername = matches[1]
filterargs = f.scan(/(?:#{FilterArgumentSeparator}|#{ArgumentSeparator})\s*((?:\w+\s*\:\s*)?#{QuotedFragment})/o).flatten
@filters << [filtername, filterargs]
end
end
end
end
end
# def lax_parse(markup)
# @filters = []
# if match = markup.match(/\s*(#{QuotedFragment})(.*)/o)
# @name = match[1]
# if match[2].match(/#{FilterSeparator}\s*(.*)/o)
# filters = Regexp.last_match(1).scan(FilterParser)
# filters.each do |f|
# if matches = f.match(/\s*(\w+)/)
# filtername = matches[1]
# filterargs = f.scan(/(?:#{FilterArgumentSeparator}|#{ArgumentSeparator})\s*((?:\w+\s*\:\s*)?#{QuotedFragment})/o).flatten
# @filters << [filtername, filterargs]
# end
# end
# end
# end
# end
def strict_parse(markup)
# Very simple valid cases

View File

@@ -18,9 +18,15 @@ Gem::Specification.new do |s|
s.required_rubygems_version = ">= 1.3.7"
s.test_files = Dir.glob("{test}/**/*")
s.files = Dir.glob("{lib}/**/*") + %w(MIT-LICENSE README.md)
s.files = Dir.glob("{lib,ext}/**/*") + %w(MIT-LICENSE README.md)
s.extensions = ['ext/liquid/extconf.rb']
s.extra_rdoc_files = ["History.md", "README.md"]
s.require_path = "lib"
s.add_development_dependency 'rake-compiler'
s.add_development_dependency 'stackprof'
s.add_development_dependency 'rake'
s.add_development_dependency 'activesupport'
end

View File

@@ -54,7 +54,7 @@ module ShopFilter
def product_img_url(url, style = 'small')
unless url =~ /^products\/([\w\-\_]+)\.(\w{2,4})/
unless url =~ /\Aproducts\/([\w\-\_]+)\.(\w{2,4})/
raise ArgumentError, 'filter "size" can only be called on product images'
end

15
performance/stackprof.rb Normal file
View File

@@ -0,0 +1,15 @@
require 'stackprof' rescue fail("install stackprof extension/gem")
require File.dirname(__FILE__) + '/theme_runner'
profiler = ThemeRunner.new
profiler.run
results = StackProf.run(mode: :cpu, out: ENV['FILENAME']) do
100.times do
profiler.run
end
end
if results.kind_of?(File)
puts "wrote stackprof dump to #{results.path}"
else
StackProf::Report.new(results).print_text(false, 20)
end

View File

@@ -25,26 +25,6 @@ end
class TemplateTest < Test::Unit::TestCase
include Liquid
def test_tokenize_strings
assert_equal [' '], Template.new.send(:tokenize, ' ')
assert_equal ['hello world'], Template.new.send(:tokenize, 'hello world')
end
def test_tokenize_variables
assert_equal ['{{funk}}'], Template.new.send(:tokenize, '{{funk}}')
assert_equal [' ', '{{funk}}', ' '], Template.new.send(:tokenize, ' {{funk}} ')
assert_equal [' ', '{{funk}}', ' ', '{{so}}', ' ', '{{brother}}', ' '], Template.new.send(:tokenize, ' {{funk}} {{so}} {{brother}} ')
assert_equal [' ', '{{ funk }}', ' '], Template.new.send(:tokenize, ' {{ funk }} ')
end
def test_tokenize_blocks
assert_equal ['{%comment%}'], Template.new.send(:tokenize, '{%comment%}')
assert_equal [' ', '{%comment%}', ' '], Template.new.send(:tokenize, ' {%comment%} ')
assert_equal [' ', '{%comment%}', ' ', '{%endcomment%}', ' '], Template.new.send(:tokenize, ' {%comment%} {%endcomment%} ')
assert_equal [' ', '{% comment %}', ' ', '{% endcomment %}', ' '], Template.new.send(:tokenize, " {% comment %} {% endcomment %} ")
end
def test_instance_assigns_persist_on_same_template_object_between_parses
t = Template.new
assert_equal 'from instance assigns', t.parse("{% assign foo = 'from instance assigns' %}{{ foo }}").render

View File

@@ -0,0 +1,64 @@
require 'test_helper'
class TokenizerTest < Test::Unit::TestCase
def test_tokenize_strings
assert_equal [' '], tokenize(' ')
assert_equal ['hello world'], tokenize('hello world')
end
def test_tokenize_variables
assert_equal ['{{funk}}'], tokenize('{{funk}}')
assert_equal [' ', '{{funk}}', ' '], tokenize(' {{funk}} ')
assert_equal [' ', '{{funk}}', ' ', '{{so}}', ' ', '{{brother}}', ' '], tokenize(' {{funk}} {{so}} {{brother}} ')
assert_equal [' ', '{{ funk }}', ' '], tokenize(' {{ funk }} ')
end
def test_tokenize_blocks
assert_equal ['{%comment%}'], tokenize('{%comment%}')
assert_equal [' ', '{%comment%}', ' '], tokenize(' {%comment%} ')
assert_equal [' ', '{%comment%}', ' ', '{%endcomment%}', ' '], tokenize(' {%comment%} {%endcomment%} ')
assert_equal [' ', '{% comment %}', ' ', '{% endcomment %}', ' '], tokenize(" {% comment %} {% endcomment %} ")
end
def test_tokenize_incomplete_end
assert_tokens 'before{{ incomplete }after', ['before', '{{ incomplete }', 'after']
assert_tokens 'before{% incomplete %after', ['before', '{%', ' incomplete %after']
end
def test_tokenize_no_end
assert_tokens 'before{{ unterminated ', ['before', '{{', ' unterminated ']
assert_tokens 'before{% unterminated ', ['before', '{%', ' unterminated ']
end
private
def assert_tokens(source, expected)
assert_equal expected, tokenize(source)
assert_equal expected, old_tokenize(source)
end
def tokenize(source)
tokenizer = Liquid::Tokenizer.new(source)
tokens = []
while token = tokenizer.next
tokens << token
end
tokens
end
AnyStartingTag = /\{\{|\{\%/
VariableIncompleteEnd = /\}\}?/
PartialTemplateParser = /#{Liquid::TagStart}.*?#{Liquid::TagEnd}|#{Liquid::VariableStart}.*?#{VariableIncompleteEnd}/o
TemplateParser = /(#{PartialTemplateParser}|#{AnyStartingTag})/o
def old_tokenize(source)
return [] if source.to_s.empty?
tokens = source.split(TemplateParser)
# removes the rogue empty element at the beginning of the array
tokens.shift if tokens[0] and tokens[0].empty?
tokens
end
end

View File

@@ -51,11 +51,9 @@ class VariableTest < Test::Unit::TestCase
end
def test_filter_with_date_parameter
var = Variable.new(%! '2006-06-06' | date: "%m/%d/%Y"!)
assert_equal "'2006-06-06'", var.name
assert_equal [["date",["\"%m/%d/%Y\""]]], var.filters
end
def test_filters_without_whitespace
@@ -73,7 +71,7 @@ class VariableTest < Test::Unit::TestCase
end
def test_symbol
var = Variable.new("http://disney.com/logo.gif | image: 'med' ", :error_mode => :lax)
var = Variable.new("http://disney.com/logo.gif | image: 'med'", :error_mode => :lax)
assert_equal "http://disney.com/logo.gif", var.name
assert_equal [["image",["'med'"]]], var.filters
end