/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* NOTE : please see documentation at bottom of this file. (It was placed there its tiring
* to always have to page past it... :)
*/
options
{
/** The default package for this parser kit */
NODE_PACKAGE="org.apache.velocity.runtime.parser";
/** A source file will be generated for each non-terminal */
MULTI=true;
/**
* Each node will have access to the parser, I did this so
* some global information can be shared via the parser. I
* think this will come in handly keeping track of
* context, and being able to push changes back into
* the context when nodes make modifications to the
* context by setting properties, variables and
* what not.
*/
NODE_USES_PARSER=true;
/**
* The parser must be non-static in order for the
* above option to work, otherwise the parser value
* is passed in as null, which isn't all the useful ;)
*/
STATIC=false;
/**
* Enables the use of a visitor that each of nodes
* will accept. This way we can separate the logic
* of node processing in a visitor and out of the
* nodes themselves. If processing changes then
* the nothing has to change in the node code.
*/
VISITOR=true;
/**
* Declare that we are accepting unicode input and
* that we are using a custom character stream class
* Note that the char stream class is really a slightly
* modified ASCII_CharStream, as it appears we are safe
* because we only deal with pre-encoding-converted
* Readers rather than raw input streams.
*/
UNICODE_INPUT=true;
USER_CHAR_STREAM=true;
/**
* for debugging purposes. Keep false
*/
DEBUG_PARSER=false;
DEBUG_TOKEN_MANAGER=false;
}
PARSER_BEGIN(Parser)
package org.apache.velocity.runtime.parser;
import java.io.*;
import java.util.*;
import org.apache.velocity.exception.VelocityException;
import org.apache.velocity.runtime.RuntimeServices;
import org.apache.velocity.runtime.parser.node.*;
import org.apache.velocity.runtime.directive.Directive;
import org.apache.velocity.runtime.directive.Macro;
import org.apache.velocity.runtime.directive.MacroParseException;
import org.apache.velocity.util.StringUtils;
import org.apache.commons.lang.text.StrBuilder;
import org.apache.velocity.runtime.RuntimeConstants;
/**
* This class is responsible for parsing a Velocity
* template. This class was generated by JavaCC using
* the JJTree extension to produce an Abstract
* Syntax Tree (AST) of the template.
*
* Please look at the Parser.jjt file which is
* what controls the generation of this class.
*
* @author Jason van Zyl
* @author Geir Magnusson Jr.
* @author Henning P. Schmiedehausen
* @version $Id$
*/
public class Parser
{
/**
* Keep track of defined macros, used for escape processing
*/
private Map macroNames = new HashMap();
/**
* Name of current template we are parsing. Passed to us in parse()
*/
public String currentTemplateName = "";
/**
* Set to true if the property
* RuntimeConstants.RUNTIME_REFERENCES_STRICT_ESCAPE is set to true
*/
public boolean strictEscape = false;
VelocityCharStream velcharstream = null;
private RuntimeServices rsvc = null;
/**
* This constructor was added to allow the re-use of parsers.
* The normal constructor takes a single argument which
* an InputStream. This simply creates a re-usable parser
* object, we satisfy the requirement of an InputStream
* by using a newline character as an input stream.
*/
public Parser( RuntimeServices rs)
{
/*
* need to call the CTOR first thing.
*/
this( new VelocityCharStream(
new ByteArrayInputStream("\n".getBytes()), 1, 1 ));
/*
* now setup a VCS for later use
*/
velcharstream = new VelocityCharStream(
new ByteArrayInputStream("\n".getBytes()), 1, 1 );
strictEscape =
rs.getBoolean(RuntimeConstants.RUNTIME_REFERENCES_STRICT_ESCAPE, false);
/*
* and save the RuntimeServices
*/
rsvc = rs;
}
/**
* This was also added to allow parsers to be
* re-usable. Normal JavaCC use entails passing an
* input stream to the constructor and the parsing
* process is carried out once. We want to be able
* to re-use parsers: we do this by adding this
* method and re-initializing the lexer with
* the new stream that we want parsed.
*/
public SimpleNode parse( Reader reader, String templateName )
throws ParseException
{
SimpleNode sn = null;
currentTemplateName = templateName;
try
{
token_source.clearStateVars();
/*
* reinitialize the VelocityCharStream
* with the new reader
*/
velcharstream.ReInit( reader, 1, 1 );
/*
* now reinit the Parser with this CharStream
*/
ReInit( velcharstream );
/*
* do that voodoo...
*/
sn = process();
}
catch (MacroParseException mee)
{
/*
* thrown by the Macro class when something is amiss in the
* Macro specification
*/
rsvc.getLog().error("Parser Error: " + templateName, mee);
throw mee;
}
catch (ParseException pe)
{
rsvc.getLog().error("Parser Exception: " + templateName, pe);
throw new TemplateParseException (pe.currentToken,
pe.expectedTokenSequences, pe.tokenImage, currentTemplateName);
}
catch (TokenMgrError tme)
{
throw new ParseException("Lexical error: " + tme.toString());
}
catch (Exception e)
{
String msg = "Parser Error: " + templateName;
rsvc.getLog().error(msg, e);
throw new VelocityException(msg, e);
}
currentTemplateName = "";
return sn;
}
/**
* This method gets a Directive from the directives Hashtable
*/
public Directive getDirective(String directive)
{
return (Directive) rsvc.getDirective(directive);
}
/**
* This method finds out of the directive exists in the directives Map.
*/
public boolean isDirective(String directive)
{
return rsvc.getDirective(directive) != null;
}
/**
* Produces a processed output for an escaped control or
* pluggable directive
*/
private String escapedDirective( String strImage )
{
int iLast = strImage.lastIndexOf("\\");
String strDirective = strImage.substring(iLast + 1);
boolean bRecognizedDirective = false;
// we don't have to call substring method all the time in this method
String dirTag = strDirective.substring(1);
if (dirTag.charAt(0) == '{')
{
dirTag = dirTag.substring(1, dirTag.length() - 1);
}
/*
* If this is a predefined derective or if we detect
* a macro definition (this is aproximate at best) then
* we absorb the forward slash. If in strict reference
* mode then we always absord the forward slash regardless
* if the derective is defined or not.
*/
if (strictEscape
|| isDirective(dirTag)
|| macroNames.containsKey(dirTag)
|| rsvc.isVelocimacro(dirTag, currentTemplateName))
{
bRecognizedDirective = true;
}
else
{
/* order for speed? */
if ( dirTag.equals("if")
|| dirTag.equals("end")
|| dirTag.equals("set")
|| dirTag.equals("else")
|| dirTag.equals("elseif")
)
{
bRecognizedDirective = true;
}
}
/*
* if so, make the proper prefix string (let the escapes do their thing..)
* otherwise, just return what it is..
*/
if (bRecognizedDirective)
return ( strImage.substring(0,iLast/2) + strDirective);
else
return ( strImage );
}
/**
* Check whether there is a left parenthesis with leading optional
* whitespaces. This method is used in the semantic look ahead of
* Directive method. This is done in code instead of as a production
* for simplicity and efficiency.
*/
private boolean isLeftParenthesis()
{
char c;
int no = 0;
try {
while(true)
{
/**
* Read a character
*/
c = velcharstream.readChar();
no++;
if (c == '(')
{
return true;
}
/**
* if not a white space return
*/
else if (c != ' ' && c != '\n' && c != '\r' && c != '\t')
{
return false;
}
}
}
catch (IOException e)
{
}
finally
{
/**
* Backup the stream to the initial state
*/
velcharstream.backup(no);
}
return false;
}
}
PARSER_END(Parser)
TOKEN_MGR_DECLS:
{
private int fileDepth = 0;
private int lparen = 0;
private int rparen = 0;
List stateStack = new ArrayList(50);
public boolean debugPrint = false;
private boolean inReference;
public boolean inDirective;
private boolean inComment;
public boolean inSet;
/**
* pushes the current state onto the 'state stack',
* and maintains the parens counts
* public because we need it in PD & VM handling
*
* @return boolean : success. It can fail if the state machine
* gets messed up (do don't mess it up :)
*/
public boolean stateStackPop()
{
ParserState s;
try
{
s = (ParserState) stateStack.remove(stateStack.size() - 1); // stack.pop
}
catch(IndexOutOfBoundsException e)
{
// empty stack
lparen=0;
SwitchTo(DEFAULT);
return false;
}
if( debugPrint )
System.out.println(
" stack pop (" + stateStack.size() + ") : lparen=" +
s.lparen +
" newstate=" + s.lexstate );
lparen = s.lparen;
rparen = s.rparen;
SwitchTo(s.lexstate);
return true;
}
/**
* pops a state off the stack, and restores paren counts
*
* @return boolean : success of operation
*/
public boolean stateStackPush()
{
if( debugPrint )
System.out.println(" (" + stateStack.size() + ") pushing cur state : " +
curLexState );
ParserState s = new ParserState();
s.lparen = lparen;
s.rparen = rparen;
s.lexstate = curLexState;
lparen = 0;
stateStack.add(s); // stack.push
return true;
}
/**
* Clears all state variables, resets to
* start values, clears stateStack. Call
* before parsing.
*/
public void clearStateVars()
{
stateStack.clear();
lparen = 0;
rparen = 0;
inReference = false;
inDirective = false;
inComment = false;
inSet = false;
return;
}
/**
* Holds the state of the parsing process.
*/
private static class ParserState
{
int lparen;
int rparen;
int lexstate;
}
/**
* handles the dropdown logic when encountering a RPAREN
*/
private void RPARENHandler()
{
/*
* Ultimately, we want to drop down to the state below
* the one that has an open (if we hit bottom (DEFAULT),
* that's fine. It's just text schmoo.
*/
boolean closed = false;
if (inComment)
closed = true;
while( !closed )
{
/*
* look at current state. If we haven't seen a lparen
* in this state then we drop a state, because this
* lparen clearly closes our state
*/
if( lparen > 0)
{
/*
* if rparen + 1 == lparen, then this state is closed.
* Otherwise, increment and keep parsing
*/
if( lparen == rparen + 1)
{
stateStackPop();
}
else
{
rparen++;
}
closed = true;
}
else
{
/*
* now, drop a state
*/
if(!stateStackPop())
break;
}
}
}
}
/* ------------------------------------------------------------------------
*
* Tokens
*
* Note : we now have another state, REFMODIFIER. This is sort of a
* type of REFERENCE state, simply use to use the DIRECTIVE token
* set when we are processing a $foo.bar() construct
*
* ------------------------------------------------------------------------- */
TOKEN:
{
{
stateStackPush();
SwitchTo(REFINDEX);
}
}
TOKEN:
{
{
stateStackPop();
}
}
TOKEN:
{
|
|
}
TOKEN:
{
}
TOKEN:
{
}
TOKEN :
{
|
}
TOKEN:
{
{
if (!inComment)
lparen++;
/*
* If in REFERENCE and we have seen the dot, then move
* to REFMOD2 -> Modifier()
*/
if (curLexState == REFMODIFIER )
SwitchTo( REFMOD2 );
}
}
/*
* we never will see a ')' in anything but DIRECTIVE and REFMOD2.
* Each have their own
*/
TOKEN:
{
/*
* We will eat any whitespace upto and including a newline for directives
*/
{
RPARENHandler();
}
}
TOKEN:
{
/*
* in REFMOD2, we don't want to bind the whitespace and \n like we
* do when closing a directive.
*/
{
/*
* need to simply switch back to REFERENCE, not drop down the stack
* because we can (infinitely) chain, ala
* $foo.bar().blargh().woogie().doogie()
*/
SwitchTo( REFERENCE );
}
}
/*----------------------------------------------
*
* escape "\\" handling for the built-in directives
*
*--------------------------------------------- */
TOKEN:
{
/*
* We have to do this, because we want these to be a Text node, and
* whatever follows to be peer to this text in the tree.
*
* We need to touch the ASTs for these, because we want an even # of \'s
* to render properly in front of the block
*
* This is really simplistic. I actually would prefer to find them in
* grammatical context, but I am neither smart nor rested, a receipe
* for disaster, another long night with Mr. Parser, or both.
*/
)* "\\#" ( | ) >
}
/*
* needed because #set is so wacky in it's desired behavior. We want set
* to eat any preceeding whitespace so it is invisible in formatting.
* (As it should be.) If this works well, I am going to chuck the whole MORE:
* token abomination.
*
* We added the lexical states REFERENCE, REFMODIFIER, REFMOD2 to
* address JIRA issue VELOCITY-631. With SET_DIRECTIVE only in the
* DEFAULT lexical state the following VTL fails "$a#set($b = 1)"
* because the Reference token uses LOOKAHEAD(2) combined with the
* fact that we explicity set the lex state to REFERENCE with the $
* token, which means we would never evaulate this token during the
* look ahead. This general issue is disscussed here:
*
* http://www.engr.mun.ca/~theo/JavaCC-FAQ/javacc-faq-ie.htm#tth_sEc3.12
*
*/
TOKEN:
{
{
if (! inComment)
{
inDirective = true;
if ( debugPrint )
System.out.print("#set : going to " + DIRECTIVE );
stateStackPush();
inSet = true;
SwitchTo(DIRECTIVE);
}
/*
* need the LPAREN action
*/
if (!inComment)
{
lparen++;
/*
* If in REFERENCE and we have seen the dot, then move
* to REFMOD2 -> Modifier()
*/
if (curLexState == REFMODIFIER )
SwitchTo( REFMOD2 );
}
}
}
<*>
MORE :
{
/*
* Note : DOLLARBANG is a duplicate of DOLLAR. They must be identical.
*/
{
if (! inComment)
{
/*
* if we find ourselves in REFERENCE, we need to pop down
* to end the previous ref
*/
if (curLexState == REFERENCE)
{
inReference = false;
stateStackPop();
}
inReference = true;
if ( debugPrint )
System.out.print( "$ : going to " + REFERENCE );
stateStackPush();
SwitchTo(REFERENCE);
}
}
|
{
if (! inComment)
{
/*
* if we find ourselves in REFERENCE, we need to pop down
* to end the previous ref
*/
if (curLexState == REFERENCE)
{
inReference = false;
stateStackPop();
}
inReference = true;
if ( debugPrint )
System.out.print( "$! : going to " + REFERENCE );
stateStackPush();
SwitchTo(REFERENCE);
}
}
| "#[["
{
if (!inComment)
{
inComment = true;
stateStackPush();
SwitchTo( IN_TEXTBLOCK );
}
}
| <"#**" ~["#"]>
{
if (!inComment)
{
input_stream.backup(1);
inComment = true;
stateStackPush();
SwitchTo( IN_FORMAL_COMMENT);
}
}
| "#*"
{
if (!inComment)
{
inComment=true;
stateStackPush();
SwitchTo( IN_MULTI_LINE_COMMENT );
}
}
|
{
if (! inComment)
{
/*
* We can have the situation where #if($foo)$foo#end.
* We need to transition out of REFERENCE before going to DIRECTIVE.
* I don't really like this, but I can't think of a legal way
* you are going into DIRECTIVE while in REFERENCE. -gmj
*/
if (curLexState == REFERENCE || curLexState == REFMODIFIER )
{
inReference = false;
stateStackPop();
}
inDirective = true;
if ( debugPrint )
System.out.print("# : going to " + DIRECTIVE );
stateStackPush();
SwitchTo(PRE_DIRECTIVE);
}
}
}
// treat the single line comment case separately
// to avoid ## errors
TOKEN :
{
{
if (!inComment)
{
if (curLexState == REFERENCE)
{
inReference = false;
stateStackPop();
}
inComment = true;
stateStackPush();
SwitchTo(IN_SINGLE_LINE_COMMENT);
}
}
}
TOKEN :
{
|
|
}
/* -----------------------------------------------------------------------
*
* *_COMMENT Lexical tokens
*
*-----------------------------------------------------------------------*/
TOKEN :
{
{
inComment = false;
stateStackPop();
}
}
TOKEN :
{
{
inComment = false;
stateStackPop();
}
}
TOKEN :
{
{
inComment = false;
stateStackPop();
}
}
TOKEN :
{
{
inComment = false;
stateStackPop();
}
}
SKIP :
{
< ~[] >
}
MORE :
{
< ~[] >
}
/* -----------------------------------------------------------------------
*
* DIRECTIVE Lexical State (some of it, anyway)
*
* ---------------------------------------------------------------------- */
TOKEN:
{
}
TOKEN :
{
//
< STRING_LITERAL:
("\""
( (~["\""])
| ("\\"
( ["n","t","b","r","f"]
| ["0"-"7"] ( ["0"-"7"] )?
| ["0"-"3"] ["0"-"7"] ["0"-"7"]
| "u" ["0"-"9", "a"-"f", "A"-"F"] ["0"-"9", "a"-"f", "A"-"F"] ["0"-"9", "a"-"f", "A"-"F"] ["0"-"9", "a"-"f", "A"-"F"]
)
)
| ("\"\"")
| ( "\\" (" ")* "\n")
)*
"\""
)
|
("\'"
( (~["\'"])
| ("''")
| ( "\\" (" ")* "\n")
)*
"\'"
)
>
{
/*
* - if we are in DIRECTIVE and haven't seen ( yet, then also drop out.
* don't forget to account for the beloved yet wierd #set
* - finally, if we are in REFMOD2 (remember : $foo.bar( ) then " is ok!
*/
if( curLexState == DIRECTIVE && !inSet && lparen == 0)
stateStackPop();
}
}
TOKEN:
{
|
}
TOKEN :
{
{
if ( debugPrint )
System.out.println(" NEWLINE :");
stateStackPop();
if (inSet)
inSet = false;
if (inDirective)
inDirective = false;
}
}
TOKEN :
{
|
|
|
|
|
|
|
|
| " | "gt" >
| =" | "ge" >
|
|
|
|
}
TOKEN :
{
{
inDirective = false;
stateStackPop();
}
|
{
SwitchTo(DIRECTIVE);
}
|
{
SwitchTo(DIRECTIVE);
}
|
{
inDirective = false;
stateStackPop();
}
}
TOKEN:
{
<#DIGIT: [ "0"-"9" ] >
/*
* treat FLOATING_POINT_LITERAL and INTEGER_LITERAL differently as a range can only handle integers.
*/
/**
* Note -- we also define an integer as ending with a double period,
* in order to avoid 1..3 being defined as floating point (1.) then a period, then a integer
*/
| )+ ("..")? >
{
/*
* Remove the double period if it is there
*/
if (matchedToken.image.endsWith("..")) {
input_stream.backup(2);
matchedToken.image = matchedToken.image.substring(0,matchedToken.image.length()-2);
}
/*
* check to see if we are in set
* ex. #set $foo = $foo + 3
* because we want to handle the \n after
*/
if ( lparen == 0 && !inSet && curLexState != REFMOD2 && curLexState != REFINDEX)
{
stateStackPop();
}
}
| )+ "." ()* ()?
| ("-")? "." ()+ ()?
| ("-")? ()+
>
{
/*
* check to see if we are in set
* ex. #set $foo = $foo + 3
* because we want to handle the \n after
*/
if ( lparen == 0 && !inSet && curLexState != REFMOD2)
{
stateStackPop();
}
}
|
<#EXPONENT: ["e","E"] (["+","-"])? (["0"-"9"])+ >
}
TOKEN:
{
<#LETTER: [ "a"-"z", "A" - "Z" ] >
| <#DIRECTIVE_CHAR: [ "a"-"z", "A"-"Z", "0"-"9", "_", "@" ] >
| | ["_"] | ["@"]) ()* >
| | ["_"]) ()* "}" >
}
/* -----------------------------------------------------------------------
*
* REFERENCE Lexical States
*
* This is more than a single state, because of the structure of
* the VTL references. We use three states because the set of tokens
* for each state can be different.
*
* $foo.bar( "arg" )
* ^ ^ ^
* | | |
* ----------- > REFERENCE : state initiated by the '$' character. Continues
* | | until end of the reference, or the . character.
* |------ > REFMODIFIER : state switched to when the is encountered.
* | note that this is a switch, not a push. See notes at bottom
* | re stateStack.
* |-- > REFMOD2 : state switch to when the LPAREN is encountered.
* again, this is a switch, not a push.
*
* During the REFERENCE or REFMODIFIER lex states we will switch to
* REFINDEX if a bracket is encountered '['. for example: $foo[1]
* or $foo.bar[1], $foo.bar( "arg" )[1]
* ---------------------------------------------------------------------------- */
TOKEN :
{
<#ALPHA_CHAR: ["a"-"z", "A"-"Z"] >
| <#ALPHANUM_CHAR: [ "a"-"z", "A"-"Z", "0"-"9" ] >
| <#IDENTIFIER_CHAR: [ "a"-"z", "A"-"Z", "0"-"9", "-", "_" ] >
| | ["_"]) ()* >
| >
{
/*
* push the alpha char back into the stream so the following identifier
* is complete
*/
input_stream.backup(1);
/*
* and munge the so we just get a . when we have normal text that
* looks like a ref.ident
*/
matchedToken.image = ".";
if ( debugPrint )
System.out.print("DOT : switching to " + REFMODIFIER);
SwitchTo(REFMODIFIER);
}
}
TOKEN :
{
|
{
stateStackPop();
}
}
SPECIAL_TOKEN :
{
{
/*
* push every terminator character back into the stream
*/
input_stream.backup(1);
inReference = false;
if ( debugPrint )
System.out.print("REF_TERM :");
stateStackPop();
}
}
SPECIAL_TOKEN :
{
{
if ( debugPrint )
System.out.print("DIRECTIVE_TERM :");
input_stream.backup(1);
inDirective = false;
stateStackPop();
}
}
/**
* This method is what starts the whole parsing
* process. After the parsing is complete and
* the template has been turned into an AST,
* this method returns the root of AST which
* can subsequently be traversed by a visitor
* which implements the ParserVisitor interface
* which is generated automatically by JavaCC
*/
SimpleNode process() : {}
{
( Statement() )*
{ return jjtThis; }
}
/**
* These are the types of statements that
* are acceptable in Velocity templates.
*/
void Statement() #void : {}
{
IfStatement()
| LOOKAHEAD(2) Reference()
| Comment()
| Textblock()
| SetDirective()
| EscapedDirective()
| Escape()
| Directive()
| Text()
}
/**
* used to separate the notion of a valid directive that has been
* escaped, versus something that looks like a directive and
* is just schmoo. This is important to do as a separate production
* that creates a node, because we want this, in either case, to stop
* the further parsing of the Directive() tree.
*/
void EscapedDirective() : {}
{
{
Token t = null;
}
t =
{
/*
* churn and burn..
*/
t.image = escapedDirective( t.image );
}
}
/**
* Used to catch and process escape sequences in grammatical constructs
* as escapes outside of VTL are just characters. Right now we have both
* this and the EscapeDirective() construction because in the EscapeDirective()
* case, we want to suck in the # and here we don't. We just want
* the escapes to render correctly
*/
void Escape() : {}
{
{
Token t = null;
int count = 0;
boolean control = false;
}
( LOOKAHEAD(2) t =
{
count++;
}
)+
{
/*
* first, check to see if we have a control directive
*/
switch(t.next.kind ) {
case IF_DIRECTIVE :
case ELSE_DIRECTIVE :
case ELSEIF_DIRECTIVE :
case END :
control = true;
break;
}
/*
* if that failed, lets lookahead to see if we matched a PD or a VM
*/
String nTag = t.next.image.substring(1);
if (strictEscape
|| isDirective(nTag)
|| macroNames.containsKey(nTag)
|| rsvc.isVelocimacro(nTag, currentTemplateName))
{
control = true;
}
jjtThis.val = "";
for( int i = 0; i < count; i++)
jjtThis.val += ( control ? "\\" : "\\\\");
}
}
void Comment() : {}
{
( ) ?
|
|
}
void Textblock() : {}
{
}
void FloatingPointLiteral() : {}
{
}
void IntegerLiteral() : {}
{
}
void StringLiteral() : {}
{
}
/**
* This method corresponds to variable
* references in Velocity templates.
* The following are examples of variable
* references that may be found in a
* template:
*
* $foo
* $bar
*
*/
void Identifier() : {}
{
}
void Word() : {}
{
}
/**
* Supports the arguments for the Pluggable Directives
*/
int DirectiveArg() #void : {}
{
Reference()
{
return ParserTreeConstants.JJTREFERENCE;
}
| Word()
{
return ParserTreeConstants.JJTWORD;
}
| StringLiteral()
{
return ParserTreeConstants.JJTSTRINGLITERAL;
}
| IntegerLiteral()
{
return ParserTreeConstants.JJTINTEGERLITERAL;
}
/*
* Need to put this before the floating point expansion
*/
| LOOKAHEAD( [] ( Reference() | IntegerLiteral()) [] ) IntegerRange()
{
return ParserTreeConstants.JJTINTEGERRANGE;
}
| FloatingPointLiteral()
{
return ParserTreeConstants.JJTFLOATINGPOINTLITERAL;
}
| Map()
{
return ParserTreeConstants.JJTMAP;
}
| ObjectArray()
{
return ParserTreeConstants.JJTOBJECTARRAY;
}
| True()
{
return ParserTreeConstants.JJTTRUE;
}
| False()
{
return ParserTreeConstants.JJTFALSE;
}
}
/**
* Supports the Pluggable Directives
* #foo( arg+ )
*/
SimpleNode Directive() :
{
Token t = null;
int argType;
int argPos = 0;
Directive d;
int directiveType;
boolean isVM = false;
boolean doItNow = false;
}
{
/*
* note that if we were escaped, that is now handled by
* EscapedDirective()
*/
((t = ) | (t = ))
{
String directiveName;
if (t.kind == ParserConstants.BRACKETED_WORD)
{
directiveName = t.image.substring(2, t.image.length() - 1);
}
else
{
directiveName = t.image.substring(1);
}
d = getDirective(directiveName);
/*
* Velocimacro support : if the directive is macro directive
* then set the flag so after the block parsing, we add the VM
* right then. (So available if used w/in the current template )
*/
if (directiveName.equals("macro"))
{
doItNow = true;
}
/*
* set the directive name from here. No reason for the thing to know
* about parser tokens
*/
jjtThis.setDirectiveName(directiveName);
if ( d == null)
{
if( directiveName.startsWith("@") )
{
// block macro call of type: #@foobar($arg1 $arg2) astBody #end
directiveType = Directive.BLOCK;
}
else
{
/*
* if null, then not a real directive, but maybe a Velocimacro
*/
isVM = rsvc.isVelocimacro(directiveName, currentTemplateName);
/*
* Currently, all VMs are LINE directives
*/
directiveType = Directive.LINE;
}
}
else
{
directiveType = d.getType();
}
/*
* now, switch us out of PRE_DIRECTIVE
*/
token_source.SwitchTo(DIRECTIVE);
argPos = 0;
}
/**
* Look for the patter [WHITESPACE]
*/
(LOOKAHEAD( { isLeftParenthesis() } )
/*
* if this is indeed a token, match the #foo ( arg ) pattern
*/
(([] ) ( LOOKAHEAD(2) [] [ []]
argType = DirectiveArg()
{
if (argType == ParserTreeConstants.JJTWORD)
{
if (doItNow && argPos == 0)
{
/* if #macro and it's the 0th arg, ok */
}
else if (isVM)
{
throw new MacroParseException("Invalid arg #"
+ argPos + " in VM " + t.image, currentTemplateName, t);
}
/* if #foreach and it's the 2nd arg, ok */
else if (d != null && (!directiveName.equals("foreach") || argPos != 1))
{
throw new MacroParseException("Invalid arg #"
+ argPos + " in directive " + t.image, currentTemplateName, t);
}
else
{
/* either schmoo or a late-defined macro,
* VelocimacroProxy will have to check for latter. */
}
}
else
{
if (doItNow && argPos == 0)
{
/* if a VM and it's the 0th arg, not ok */
throw new MacroParseException("Invalid first arg"
+ " in #macro() directive - must be a"
+ " word token (no \' or \" surrounding)", currentTemplateName, t);
}
}
argPos++;
}
)* [] )
{
if (directiveType == Directive.LINE)
{
return jjtThis;
}
}
|
{
if (doItNow) // doItNow is true if the directive is "macro"
{
// VELOCITY-667 We get here if we have a "#macro" construct
// without parenthesis which is a parse error
throw new MacroParseException("A macro declaration requires at least a name argument"
, currentTemplateName, t);
}
/**
* Not a directive
*/
token_source.stateStackPop();
token_source.inDirective = false;
return jjtThis;
}
)
/*
* and the following block if the PD needs it
*/
( Statement() )* #Block
{
/*
* VM : if we are processing a #macro directive, we need to
* process the block. In truth, I can just register the name
* and do the work later when init-ing. That would work
* as long as things were always defined before use. This way
* we don't have to worry about forward references and such...
*/
if (doItNow)
{
// Further checking of macro arguments
Macro.checkArgs(rsvc, t, jjtThis, currentTemplateName);
// Add the macro name so that we can peform escape processing
// on defined macros
String macroName = jjtThis.jjtGetChild(0).getFirstToken().image;
macroNames.put(macroName, macroName);
}
/*
* VM : end
*/
return jjtThis;
}
}
/**
* for creating a map in a #set
*
* #set($foo = {$foo : $bar, $blargh : $thingy})
*/
void Map() : {}
{
(
LOOKAHEAD(2) Parameter() Parameter() ( Parameter() Parameter() )*
|
[ ]
)
/** note: need both tokens as they are generated in different states **/
( | )
}
void ObjectArray() : {}
{
[ Parameter() ( Parameter() )* ]
}
/**
* supports the [n..m] vector generator for use in
* the #foreach() to generate measured ranges w/o
* needing explicit support from the app/servlet
*/
void IntegerRange() : {}
{
[]
( Reference() | IntegerLiteral())
[] []
(Reference() | IntegerLiteral())
[]
}
/**
* A Simplified parameter more suitable for an index position: $foo[$index]
*/
void IndexParameter() #void: {}
{
[]
(
StringLiteral()
| IntegerLiteral()
| True()
| False()
| Reference()
)
[ ]
}
/**
* This method has yet to be fully implemented
* but will allow arbitrarily nested method
* calls
*/
void Parameter() #void: {}
{
[]
(
StringLiteral()
| IntegerLiteral()
| LOOKAHEAD( [] ( Reference() | IntegerLiteral()) [] ) IntegerRange()
| Map()
| ObjectArray()
| True()
| False()
| Reference()
| FloatingPointLiteral()
)
[ ]
}
/**
* This method has yet to be fully implemented
* but will allow arbitrarily nested method
* calls
*/
void Method() : {}
{
Identifier() [ Parameter() ( Parameter() )* ]
}
void Index() : {}
{
IndexParameter()
}
void Reference() : {}
{
/*
* A reference is either ${} or $
*/
(
(Index())*
(LOOKAHEAD(2) (LOOKAHEAD(3) Method() | Identifier() ) (Index())* )*
)
|
(
(Index())*
(LOOKAHEAD(2) (LOOKAHEAD(3) Method() | Identifier() ) (Index())* )*
)
}
void True() : {}
{
}
void False() : {}
{
}
/**
* This is somewhat of a kludge, the problem is that the parser picks
* up on '$[' , or '$![' as being a Reference, and does not dismiss it even though
* there is no between $ and [, This has something to do
* with the LOOKAHEAD in Reference, but I never found a way to resolve
* it in a more fashionable way..
*/
TOKEN :
{
}
/**
* This method is responsible for allowing
* all non-grammar text to pass through
* unscathed.
*/
void Text() : {}
{
|
|
|
|
|
|
|
|
|
|
}
/* -----------------------------------------------------------------------
*
* Defined Directive Syntax
*
* ----------------------------------------------------------------------*/
void IfStatement() : {}
{
[] Expression()
( Statement() )* #Block
[ LOOKAHEAD(1) ( ElseIfStatement() )+ ]
[ LOOKAHEAD(1) ElseStatement() ]
}
void ElseStatement() : {}
{
( Statement() )* #Block
}
void ElseIfStatement() : {}
{
[]
Expression()
( Statement() )* #Block
}
/**
* Currently support both types of set :
* #set( expr )
* #set expr
*/
void SetDirective() : {}
{
([] Reference() [] Expression()
{
/*
* ensure that inSet is false. Leads to some amusing bugs...
*/
token_source.inSet = false;
}
[] )
}
/* -----------------------------------------------------------------------
*
* Expression Syntax
*
* ----------------------------------------------------------------------*/
void Expression() : {}
{
// LOOKAHEAD( PrimaryExpression() ) Assignment()
//|
ConditionalOrExpression()
}
void Assignment() #Assignment(2) : {}
{
PrimaryExpression() Expression()
}
void ConditionalOrExpression() #void : {}
{
ConditionalAndExpression()
( ConditionalAndExpression() #OrNode(2) )*
}
void ConditionalAndExpression() #void : {}
{
EqualityExpression()
( EqualityExpression() #AndNode(2) )*
}
void EqualityExpression() #void : {}
{
RelationalExpression()
(
RelationalExpression() #EQNode(2)
| RelationalExpression() #NENode(2)
)*
}
void RelationalExpression() #void : {}
{
AdditiveExpression()
(
AdditiveExpression() #LTNode(2)
| AdditiveExpression() #GTNode(2)
| AdditiveExpression() #LENode(2)
| AdditiveExpression() #GENode(2)
)*
}
void AdditiveExpression() #void : {}
{
MultiplicativeExpression()
(
MultiplicativeExpression() #AddNode(2)
| MultiplicativeExpression() #SubtractNode(2)
)*
}
void MultiplicativeExpression() #void : {}
{
UnaryExpression()
(
UnaryExpression() #MulNode(2)
| UnaryExpression() #DivNode(2)
| UnaryExpression() #ModNode(2)
)*
}
void UnaryExpression() #void : {}
{
LOOKAHEAD(2) [] UnaryExpression() #NotNode(1)
| PrimaryExpression()
}
void PrimaryExpression() #void : {}
{
[]
(
StringLiteral()
| Reference()
| IntegerLiteral()
| LOOKAHEAD( [] ( Reference() | IntegerLiteral()) [] ) IntegerRange()
| FloatingPointLiteral()
| Map()
| ObjectArray()
| True()
| False()
| Expression()
)
[]
}
/* ======================================================================
Notes
-----
template == the input stream for this parser, contains 'VTL'
mixed in with 'schmoo'
VTL == Velocity Template Language : the references, directives, etc
schmoo == the non-VTL component of a template
reference == VTL entity that represents data within the context. ex. $foo
directive == VTL entity that denotes 'action' (#set, #foreach, #if )
defined directive (DD) == VTL directive entity that is expressed
explicitly w/in this grammar
pluggable directive (PD) == VTL directive entity that is defined outside of the
grammar. PD's allow VTL to be easily expandable w/o parser modification.
The problem with parsing VTL is that an input stream consists generally of
little bits of VTL mixed in with 'other stuff, referred to as 'schmoo'.
Unlike other languages, like C or Java, where the parser can punt whenever
it encounters input that doesn't conform to the grammar, the VTL parser can't do
that. It must simply output the schmoo and keep going.
There are a few things that we do here :
- define a set of parser states (DEFAULT, DIRECTIVE, REFERENCE, etc)
- define for each parser state a set of tokens for each state
- define the VTL grammar, expressed (mostly) in the productions such as Text(),
SetStatement(), etc.
It is clear that this expression of the VTL grammar (the contents
of this .jjt file) is maturing and evolving as we learn more about
how to parse VTL ( and as I learn about parsing...), so in the event
this documentation is in disagreement w/ the source, the source
takes precedence. :)
Parser States
-------------
DEFAULT : This is the base or starting state, and strangely enough, the
default state.
PRE_DIRECTIVE : State immediately following '#' before we figure out which
defined or pluggable directive (or neither) we are working with.
DIRECTIVE : This state is triggered by the a match of a DD or a PD.
REFERENCE : Triggered by '$'. Analagous to PRE_DIRECTIVE.
REFMODIFIER : Triggered by . when in REFERENCE.
REFMOD2 : Triggered by ( when in REFMODIFIER
(cont)
Escape Sequences
----------------
The escape processing in VTL is very simple. The '\' character acts
only as an escape when :
1) On or more touch a VTL element.
A VTL element is either :
1) It preceeds a reference that is in the context.
2) It preceeds a defined directive (#set, #if, #end, etc) or a valid
pluggable directive, such as #foreach
In all other cases the '\' is just another piece of text. The purpose of this
is to allow the non-VTL parts of a template (the 'schmoo') to not have to be
altered for processing by Velocity.
So if in the context $foo and $bar were defined and $woogie was not
\$foo \$bar \$woogie
would output
$foo $bar \$woogie
Further, you can stack them and they affect left to right, just like convention
escape characters in other languages.
\$foo = $foo
\\$foo = \
\\\$foo = \$foo
What You Expect
---------------
The recent versions of the parser are trying to support precise output to
support general template use. The directives do not render trailing
whitespace and newlines if followed by a newline. They will render
preceeding whitespace. The only exception is #set, which also eats
preceeding whitespace.
So, with a template :
------
#set $foo="foo"
#if($foo)
\$foo = $foo
#end
------
it will render precisely :
------
$foo = foo
------
*/