% Copyright 2012 VAStech SA (PTY) LTD
% 
%    Licensed under the Apache License, Version 2.0 (the "License");
%    you may not use this file except in compliance with the License.
%    You may obtain a copy of the License at
% 
%        http://www.apache.org/licenses/LICENSE-2.0
% 
%    Unless required by applicable law or agreed to in writing, software
%    distributed under the License is distributed on an "AS IS" BASIS,
%    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%    See the License for the specific language governing permissions and
%    limitations under the License.


\documentclass[12pt]{article}
\usepackage{amsmath}
\usepackage[margin=2cm]{geometry}
\usepackage[parfill]{parskip}
\usepackage[pdftex]{graphicx}
\DeclareGraphicsExtensions{.jpg, .pdf}
\usepackage{longtable}
\usepackage{fancyvrb}
\usepackage{listings}
\usepackage{pdflscape}
\usepackage{pdfpages}
\usepackage[bookmarksnumbered,bookmarksopen,bookmarksopenlevel=2,pdffitwindow]{hyperref}

\begin{document}
\begin{center}
	{\Huge Learn You a gocc for Great Good} \\
	{\large or \\
	How to save the world by using compiler theory \\
	\vspace{1cm}
	2014-05-28}\\
	\vspace{1cm}
	\includegraphics[scale=.1]{gocc}
\end{center}
\tableofcontents

\newcommand{\TBD}{\textcolor{red}{TBD}}
\newcommand{\TBDx}[1]{\textcolor{red}{TBD:} #1}
\newcommand{\Go}{{\em Go}\ }
\newcommand{\Code}[1]{{\bf #1} } 
\newcommand{\gocc}{{\em gocc}\ }
\newcommand{\Clang}{{\em C} language}

\section{Introduction}
	gocc is a compiler kit which generates lexers, parsers and stand-alone DFAs from an EBNF file. 

	gocc lexers are deterministic finite state automata (DFA), which recognise regular languages.  

	gocc parsers are pushdown automata (PDA), which recognise LR-1 languages. LR-1 is the set of languages, which can be parsed deterministically. Some context free grammars (CFG) are outside LR-1, because they produce ambiguous derivations. gocc recognises ambiguous grammars and can automatically resolve LR-1 conflicts (see section~\ref{sec:lr conflicts}).

	gocc can also be used to generate stand-alone finite state automata for parsing simple regular languages. See for example: see the mail address example (section~\ref{sec:example mail}).

	gocc supports parser error recovery (see section~\ref{sec:error recovery}). 

	gocc supports action expressions, embedded in the input grammar, for the specification of semantic actions. For simple applications action expressions can be used to implement a syntax directed translation directly within the grammar. See the example in section~\ref{sec:first example}. For more complex applications the action routines can be used to build an abstract syntax tree (AST), which is further processed by later stages of the application. See the example in section~\ref{sec:example ast}.

	gocc has been successfully used to develop a query language compiler; a configuration / control language for a distributed system; parsers for protocol messages specified in ABNF~\cite{ABNF} and gographviz (\url{http://code.google.com/p/gographviz/}). In addition gocc1 was used to generate the parser for gocc2, and gocc2 to generate the lexer and parser for gocc3.

	gocc was designed to be easy to use and experience has shown that its users require very little background knowledge of language and compiler theory to apply it to simple language applications, such as syntax directed translation. An appreciation of mathematical formalism is usually enough and this guide is intended to provide sufficient information for such users, provided they understand:
	\begin{itemize}
		\item The go language;
		\item How to use context free grammars;
		\item How to separate lexical, syntactic and semantic analysis.
	\end{itemize}

	More complex applications, such as compiled languages and advanced protocol message parsing require more background, especially:
	\begin{itemize}
		\item The relationship between languages, grammars and automata;
		\item The relationship between regular and context free grammars;
		\item The equivalence of finite state automata with regular grammars; and of pushdown automata with context free grammars;
		\item The meaning and limits of top down/predictive parsing, bottom up parsing and deterministic parsing;
		\item The implications of language ambiguity as well as shift/reduce and reduce/reduce conflicts;
		\item The implications of grammars that generate languages outside the class of context free languages;
		\item Compiler design.
	\end{itemize}

	The author  considers the {\em Dragon Book}~\cite{Dragon Book} still the best reference for these topics. The reader is also directed to \cite{Modern Compiler Design} for a modern treatment of compiler design, as well as \cite{Parsing} for a comprehensive treatment of the parsing techniques used in gocc.

	gocc was conceived out of need in the year after Google released the \Go language. At the time there was no other parser generator available, which could generate parsers in the \Go language. The author set out to create a parser generator for the set of all deterministically parseable languages, which implied the LR(1) technique. Although there are now alternatives to gocc available to \Go programmers we offer gocc to the community in the hope that someone may find it useful and as a token of thanks to Google for the gift of \Go.

\section{Copyright}
	\begin{verbatim}
		Copyright 2012 VAStech SA (PTY) LTD
		
		    Licensed under the Apache License, Version 2.0 (the "License");
		    you may not use this file except in compliance with the License.
		    You may obtain a copy of the License at
		    
		        http://www.apache.org/licenses/LICENSE-2.0
		    
		    Unless required by applicable law or agreed to in writing, software
		    distributed under the License is distributed on an "AS IS" BASIS,
		    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
		    See the License for the specific language governing permissions and
		    limitations under the License.
	\end{verbatim}



\section{Definition of terms}
	\begin{longtable}{lp{15cm}}
		\bf AST & Abstract syntax tree\\
		\bf CFG & Context Free Grammar \\
		\bf DFA & Deterministic Finite State Automaton. gocc lexers are DFAs. \\
		\bf PDA & Pushdown Automaton. gocc parsers are PDA's, and can recognise all deterministically parseable (LR-1) languages.\\
	\end{longtable}

\section{Getting started}
	\begin{enumerate}
		\item Download and install \Go from \url{http://golang.org}.

		\item Set your \Code{GOPATH} environment variable. See \url{http://golang.org/doc/code.html}.

		\item Install \gocc:
			\begin{enumerate}
				\item In your command line run: \Code{go get code.google.com/p/gocc/} (go get will git clone gocc into GOPATH/src/code.google.com/p/gocc and run go install)

					or 

				\item Alternatively clone the repository: \url{https://code.google.com/p/gocc/source/checkout}. Followed by:
				\verb|go install code.google.com/p/gocc|.
			\end{enumerate}

	\end{enumerate}

	Test your installation by running \verb|make test| from \verb|$GOPATH/src/code.google.com/p/gocc|.

\section{How to create and use a parser with gocc}
	Figure~\ref{fig:hl design} shows the high-level design of a user application, which uses a parser generated with gocc.
	\begin{itemize}
		\item The user creates a target grammar conforming to the gocc BNF standard (see section~\ref{sec:gocc syntax}).

		\item gocc reads the target grammar and generates the components shown in heavy outline in fig~\ref{fig:hl design}, i.e.: the lexer, parser, token and error packages. 

		\item The user also creates a package called by the compiler to execute semantic actions for each recognised production of the target grammar. The methods of the semantic package provided by the user correspond to the method calls specified in the action expressions of the target grammar.

		\item The user application initialises a lexer object with the input text. Then it calls the \Code{Parse(...)} method of the parser.

		\item Once created, the lexer and parser objects may be used repeatedly for successive inputs. For each input the lexer must be initialised with the next input text and the parser's \Code{Parse(...)} method called with a reference to the lexer.	

		\item The parser reads a stream of tokens (lexical elements) from the lexer (lexer) by repeatedly calling the lexer interface method, \Code{lexer.Scan()}. 

		\begin{verbatim}
			type Scanner interface {
			    Scan() (tok *token.Token)
			}
		\end{verbatim}

		Each call to \Code{lexer.Scan} returns a pointer to token.Token.

		\item The lexer reads a stream of input characters and recognizes the tokens specified in the target grammar. After reaching the end of input it returns the end of input token to every call to \Code{lexer.Scan()}.

		\item Whenever the parser recognises the complete body of a production of the target grammar, it calls the function specified in the action expression associated with that production alternative. The parsed symbols of the recognised production are passed as parameters to the action expression (see section~\ref{sec:gocc syntax}). The result of the action expression is placed on the parser's stack as an attribute of the recognised language symbol.

		\item When the parser recognises the complete start production of the grammar it calls its associated action expression. The result of the action expression is returned to the user application as type \Code{interface\{\}} together with a \Code{nil} error value.

		\item If the parser encounters an error in the input it may perform automatic error recovery (see section~\ref{sec:error recovery}). If the error is recoverable the parser places all the parsed language symbols associated with the error (completed productions as well as tokens) in a symbol of type \Code{*errors.Error} and places this symbol on the parser stack. The parser then discards input tokens until it encounters an input token which may validly follow the recovered production and parsing continues normally. When error recovery is specified the user application must handle the error symbols which it may receive as attributes in calls to action expressions, or which may be returned as a top-level result of the parse to the calling application.

		\item If the parser encounters an irrecoverable error it returns a non-\Code{nil} error value together with an {\em indeterminate} parse result.
	\end{itemize}

	\begin{figure}
		\includegraphics[scale=1]{"hl_design"}
		\caption{High-level design}
		\label{fig:hl design}
	\end{figure}

\section{First example}\label{sec:first example}
	This example shows how action expressions in the BNF can be used to implement a syntax directed translation scheme without the need for further user provided packages, such as AST.

	The source code of the following example can be found at 

	\verb|$GOPATH/src/code.google.com/p/gocc/example/calc| 

	The grammar implements the simple desktop calculator described in~\cite{Dragon Book}. The generated code is both a parser and an interpreter for the calculator.

	The following files are provided by the user:
	\begin{verbatim}
		> ls -R
		calc.bnf	calc_test.go	
	\end{verbatim}

	\begin{description}
		\item[calc.bnf] contains the grammar for this example.

		\item[calc\_test.go] will be used to execute the generated code. It represents the user application.

	\end{description}

	\subsection{Step 1: generate code}
		To generate code we run gocc from the directory containing \verb|calc.bnf| with the following command:

		\begin{verbatim}
			> gocc calc.bnf
		\end{verbatim}

		After running gocc we see that the directory structure now contains the following files:

		\begin{verbatim}
			> ls -R
			LR1_sets.txt	calc_test.go	first.txt	lexer_sets.txt	token
			calc.bnf	errors		lexer		parser		util

			./errors:
			errors.go

			./lexer:
			acttab.go		lexer.go		transitiontable.go

			./parser:
			action.go		actiontable.go		gototable.go		parser.go		productionstable.go

			./token:
			token.go

			./util:
			litconv.go	rune.go
		\end{verbatim}

		The generated files are:
		\begin{description}
			\item[LR1\_sets.txt and first.txt] Files containing information about the parser table generation process. They are useful for debugging the parser.

			\item[lexer\_sets.txt] File containing information about the lexer generation process. It us useful for debugging the lexer.

			\item[errors/errors.go] Declares \verb|type Error|, which is used during automatic recovery from errors in the input. See section~\ref{sec:error recovery} for more details.

			\item[lexer/] Contains the files generated for the lexer.

			\item[parser/] Contains the files generated for the parser. The interpreter code is embedded within \verb|parser/productionstable.go|.

			\item[token/token.go] Contains the declaration of the tokens of the grammar.

			\item[util/litconv.go] Contains functions to convert a token literal to a value, e.g.: int64 or rune.

			\item[util/rune.go] Contains func() RuneToString, which is used by debug code in the lexer.
		\end{description}

	\subsection{The example grammar}
		\verb|calc.bnf| displays the main features of a gocc BNF file:

		\begin{verbatim}
			/* Lexical part */

			_digit : '0'-'9' ;

			int64 : '1'-'9' {_digit} ;

			!whitespace : ' ' | '\t' | '\n' | '\r' ;

			/* Syntax part */

			<< 
			import(
			    "code.google.com/p/gocc/example/calc/token"
			    "code.google.com/p/gocc/example/calc/util"
			)
			>>

			Calc : Expr;

			Expr
			    : Expr "+" Term           << $0.(int64) + $2.(int64), nil >>
			    | Term			
			    ;

			Term
			    : Term "*" Factor         << $0.(int64) * $2.(int64), nil >>
			    |	Factor			
			    ;

			Factor 
			    : "(" Expr ")"            << $1, nil >>
			    | int64                   << util.IntValue($0.(*token.Token).Lit) >>
			    ;

		\end{verbatim}

		The BNF has two parts: a lexical and a syntax part. Both parts are optional -- gocc can be used to generate only a lexer or only a parser -- but every BNF must have at least a lexical part or a syntax part. This example has both and gocc will generate a lexer, a parser and all the support they require to function.

		It declares the imported package \verb|code.google.com/p/gocc/example/calc/token| (generated by gocc), which will be used in the action expressions of some productions of the grammar.

		The lexical part consists of:

		\begin{enumerate}
			\item A token definition, \verb|int64|, which is used in the syntax part. Token identifiers always start with a lower case letter in the range: \verb|'a'-'z'|. The token \verb|int64| must start with a digit in the range, \verb|'1'-'9'|, followed by zero or more digits in the range, \verb|'0'-'9'|. It uses the regular definition, \verb|_digit|, to declare the range, \verb|'0'-'9'|. The curly braces around \verb|_digit| indicate that it may be repeated zero or more times.

			A lexical pattern may also be enclosed in square brackets, e.g.: \verb|[ '0'-'9' ]|, to indicate zero or one instances of it (in this case a decimal digit) may be present. See section~\ref{sec:gocc syntax} for details.

			\item \verb|!whitespace| declares an ignored token. The \verb|'!'| before \verb|whitespace| indicates that the token is to be ignored. The generated lexer suppresses ignored tokens so that the parser never sees them. They can be uses for white space and comments.

			\item A regular definition, \verb|_digit|. Regular definition identifiers always start with \verb|'_'|. Regular definitions act like macros definitions in the BNF and allow tokens to be constructed from pre-defined components. Regular definitions and token definitions may not be mutually or self-recursive. That would lead to a context free grammar, which cannot be recognised by a finite state automaton.
		\end{enumerate}

		gocc supports c-style line and block comments. The calc BNF contains two block comments.

		The syntax part consists of:

		\begin{enumerate}
			\item An optional file header section, which is textually included in the generated parser file, \verb|parser/productionstable.go|. The file header allows the user to specify imports, declarations and functions, which are required by the action routines in the syntax part. In the calc example the file header contains two import declarations.

			\item The syntax start symbol, \verb|Calc|, is the highest level syntax production of the BNF. The parser will try to match the input source with a valid derivation of the start symbol. All syntax production identifiers start with an upper case letter in the range, \verb|'A'-'Z'|.

			\item Each alternative of a production may optionally have an action expression. E.g.:

			\begin{verbatim}
				Factor : int64                 << util.IntValue($0.(*token.Token).Lit) >>
			\end{verbatim}

			has an action expression, \verb|<< util.IntValue($0.(*token.Token).Lit) >>|, which contains a call to the function, IntValue, in the generated file util/litconv.go. Attribute \verb|$0|, associated with the symbol \verb|int64| in the body of the production, is type asserted to \verb|*token.Token|, of which the field \verb|Lit| is passed as parameter to the function.
			Action expressions must always return a value of type, \verb|(interface{}, error)|. If the returned error parameter is \verb|nil|, the \verb|interface{}| parameter is placed on the parser stack as the attribute of the recognised production.

			\item Each recognised term in a production has an associated attribute. For example: the production alternative:

			\begin{verbatim}
				Term : Term "*" Factor         << $0.(int64) * $2.(int64), nil >>
			\end{verbatim}

			has three symbols in its body. Each has an associated attribute on the stack: \verb|Term|, type \verb|int64|; \verb|"*"|, type \verb|*token.Token|; and \verb|Factor|, type \verb|int64|. They may be referred to in the action expression as \$0, \$1 and \$2, respectively.

			The action expression in this example means the following: {\em return the product of \$0 and \$2, both cast to type \verb|int64|, together with \verb|nil|.}
		\end{enumerate}

		When the parser has recognised the whole body of a production alternative, it calls the associated action expression with the attributes of the recognised language symbols of that body. If the action expression returns a non-nil error the parser stops and returns the error to the calling user application. If the action expression returns a nil error the parser replaces the recognised language symbols of the production on its stack with the attribute returned by the action expression.

		If a recognised production alternative does not have a specific action expression, e.g.: 
		\begin{verbatim}
			Calc : Expr;
		\end{verbatim}

		the parser always pushes \$0 on the stack as the attribute of the production.

		The first alternative of \verb|Expr| returns the sum of the attributes of \verb|Expr| and \verb|Term| after casting them to \verb|int64|. The second alternative returns the attribute of \verb|Term|.

		The first alternative of \verb|Term| returns the product of \verb|Term| and \verb|Factor| after casting them to \verb|int64|. The second term returns the attribute of \verb|Factor|.

		The first alternative of \verb|Factor| simply returns the attribute of the parenthesised \verb|Expr|. The second alternative returns the value of a numeric token.

		In the second alternative of \verb|Factor| we use a method on the input token, which returns 
		\verb|(int64, error)|. Therefore the types of all numbers are \verb|int64|.

	\subsection{The test program}
		The root folder of the \Code{Calc} example contains \Code{calc\_test.go}, which has the following test program. In addition to testing the code it shows how to initialise and use the generated lexer and parser/interpreter.

		\begin{verbatim}
			package calc

			import (
			    "code.google.com/p/gocc/example/calc/lexer"
			    "code.google.com/p/gocc/example/calc/parser"
			    "fmt"
			    "testing"
			)

			type TI struct {
			    src    string
			    expect int64
			}

			var testData = []*TI{
			    {"1 + 1", 2},
			    {"1 * 1", 1},
			    {"1 + 2 * 3", 7},
			}

			func Test1(t *testing.T) {
			    p := parser.NewParser()
			    pass := true
			    for _, ts := range testData {
			        s := lexer.NewLexer([]byte(ts.src))
			        sum, err := p.Parse(s)
			        if err != nil {
			            pass = false
			            t.Log(err.Error())
			        }
			        if sum != ts.expect {
			            pass = false
			            t.Log(fmt.Sprintf("Error: %s = %d. Got %d\n", ts.src, sum, ts.expect))
			        }
			    }
			    if !pass {
			        t.Fail()
			    }
			}
		\end{verbatim}

	\subsection{Step 2: running \Code{go test}}
		From the root folder of the \Code{Calc} example, execute the following command:

		\begin{verbatim}
			> go test -v .
		\end{verbatim}

		which generates the following output:

		\begin{verbatim}
			> go test -v
			warning: building out-of-date packages:
			    code.google.com/p/gocc/example/calc/token
			    code.google.com/p/gocc/example/calc/lexer
			    code.google.com/p/gocc/example/calc/errors
			    code.google.com/p/gocc/example/calc/parser
			installing these packages with 'go test -i' will speed future tests.

			=== RUN Test1
			--- PASS: Test1 (0.00 seconds)
			PASS
			ok      code.google.com/p/gocc/example/calc	0.017s
		\end{verbatim}

		{\em Congratulations!} You have executed your first gocc-generated code.

\section{Commandline syntax}\label{sec:commandline}
	\begin{verbatim}
        usage: gocc flags bnf_file
        
          bnf_file: contains the BNF grammar
        
        Flags:
          -a=false: automatically resolve LR(1) conflicts
          -debug_lexer=false: enable debug logging in lexer
          -debug_parser=false: enable debug logging in parser
          -h=false: help
          -no_lexer=false: do not generate a lexer
          -o="/Users/marius/goprj/src/code.google.com/p/gocc": output dir.
          -p="code.google.com/p/gocc": package
          -u=false: allow unreachable productions
          -v=false: verbose
	\end{verbatim}

\section{Example: parsing simple mail addresses} \label{sec:example mail}
	This example shows how gocc can be used to generate a stand-alone FSA to parse a regular language. The goal is to parse simple mail address specifications like: \verb|mailbox@gmail.com| or \verb|"mail box"@gmail.com|. The source code of the sample can be found at

	\verb|$GOPATH/src/code.google.com/p/gocc/example/mail|

	\verb|mail.bnf| contains:

	\begin{verbatim}
		!whitespace : '\t' | '\n' | '\r' | ' ' ;

		_atext  : 'A'-'Z' | 'a'-'z' | '0'-'9'	
		        | '!' | '#' | '$' | '%' | '&' | '\'' | '*' | '+' | '-' | '/' 
		        | '=' | '?' | '^' | '_' | '`' | '{' | '|' | '}' | '~'
		        | '\u0100'-'\U0010FFFF'
		;

		_atom : _atext {_atext} ;

		_dotatom : _atom {'.' _atom} ;

		_quotedpair : '\\' . ;

		_quotedstring : '"' (_quotedpair | .) {_quotedpair | .} '"' ;

		addrspec : (_dotatom | _quotedstring)  '@' _dotatom ;
	\end{verbatim}

	The production 

	\verb|_quotedpair : ’\\’ . ;|

	uses \verb|'.'| to specify that any UTF-8 rune will be accepted after a \verb|'\'| character in the input.

	We generate code for the example by

	\verb|gocc mail.bnf|

	which produces the following generated code:

	\begin{verbatim}
		> ls -R
		lexer		lexer_sets.txt	mail.bnf	parser_test.go	token		util

		./lexer:
		acttab.go		lexer.go		transitiontable.go

		./token:
		token.go

		./util:
		rune.go
	\end{verbatim}

	Note that no parser had been generated, because the BNF does not include a syntax part. gocc generates only a DFA/lexer for the lexical part, which is present in the BNF. This application uses only a DFA to parse its input.

	\verb|parser_test.go| shows how the generated DFA could be repeatedly invoked to parse a stream of email addresses:

	\begin{verbatim}
		package mail

		import (
		    "code.google.com/p/gocc/example/mail/lexer"
		    "code.google.com/p/gocc/example/mail/token"
		    "testing"
		)

		var testData1 = map[string]bool{
		    "mymail@google.com":          true,
		    "@google.com":                false,
		    `"quoted string"@mymail.com`: true,
		    `"unclosed quote@mymail.com`: false,
		}
	\end{verbatim}

	Function \verb|Test1| proves that the DFA correctly recognises addresses according to the specification.

	\begin{verbatim}
		func Test1(t *testing.T) {
		    for input, ok := range testData1 {
		        l := lexer.NewLexer([]byte(input))
		        tok := l.Scan()
		        switch {
		        case tok.Type == token.INVALID:
		            if ok {
		                t.Errorf("%s", input)
		            }
		        case tok.Type == token.TokMap.Type("addrspec"):
		            if !ok {
		                t.Errorf("%s", input)
		            }
		        default:
		            t.Fatalf("This must not happen")
		        }
		    }
		}

	\end{verbatim}

	Function \verb|Test2| shows how to invoke the lexer repeatedly to parse a stream of addresses.

	\begin{verbatim}
		var checkData2 = []string{
		    "addr1@gmail.com",
		    "addr2@gmail.com",
		    "addr3@gmail.com",
		}

		var testData2 = `
		    addr1@gmail.com
		    addr2@gmail.com
		    addr3@gmail.com
		`

		func Test2(t *testing.T) {
		    l := lexer.NewLexer([]byte(testData2))
		    num := 0
		    for tok := l.Scan(); tok.Type == token.TokMap.Type("addrspec"); tok = l.Scan() {
		        if string(tok.Lit) != checkData2[num] {
		            t.Errorf("%s != %s", string(tok.Lit), checkData2[num])
		        }
		        num++
		    }
		    if num != len(checkData2) {
		        t.Fatalf("%d addresses parsed", num)
		    }
		}
	\end{verbatim}

\section{Handling LR(1) conflicts} \label{sec:lr conflicts}
	If a target grammar is outside the class of LR(1) grammars it cannot be parsed deterministically with one symbol lookahead. This condition manifests as LR(1) conflicts, of which there are two types:

	\begin{description}
		\item[Shift/Reduce conflict:] The parser has recognised a valid production body on the stack, and can reduce it to the corresponding production. 

		However, the same symbols are also a valid prefix of the body of another, longer production. The parser could continue to shift the input symbols and attempt to recognise the longer production.

		\gocc uses the {\em maximal-munch rule} (see~\cite{Modern Compiler Design}) to resolve this conflict by always choosing shift over reduce. The longest valid production will therefore always be recognised.

		\item[Reduce/Reduce conflict:] The parser has recognised a valid sequence of symbols, which can be reduced to more than one production.

		\gocc will always reduce the production that was declared first in the grammar.
	\end{description}

\section{Example: reduce/reduce conflict handling} \label{sec:example rr}
	The source code of the following example can be found at

	\verb|$GOPATH/src/code.google.com/p/gocc/example/reducereduce|

	\begin{verbatim}
	RR : A | B ;

	B : a ;

	A : a | A a ;

	\end{verbatim}

	When we run \gocc on \verb|$GOPATH/src/code.google.com/p/gocc/example/reducereduce/rr.bnf| we discover a reduce/reduce conflict:

	\begin{verbatim}
		> gocc -v rr.bnf
		LR(1) conflict: S4 Reduce:3(B) / Reduce:4(A)
		ABORTING: 1 LR(1) conflicts
	\end{verbatim}

	\gocc does not generate code because the default for automatic LR(1) conflict resolution is \verb|off|. From the output we see that \gocc could reduce either of production \verb|B| or \verb|A| in state \verb|4|.

	\gocc generates a number of informational files, and at this point we turn to 

	\verb|$GOPATH/src/code.google.com/p/gocc/example/reducereduce/sm_sets.txt|

	to analyse the conflict.

	\verb|sm_set.txt| contains the LR(1) sets, which will be translated into the states of the parser. Each state contains a set of {\em LR(1)  items}, which specifies what the parser expects in that state. 

	An LR(1) item is a production alternative with the position of the parser marked by a $\bullet$, and the next symbol expected after this production body, in double angle brackets. Alternatives of a production are in  separate items. For example: 

	$A : a\bullet <<\$>>$

	indicates that the compiler has recognised the production alternative, \verb|A : a| and  next expects to see the end of input character, \verb|$|.

	Getting back to our R/R conflict, \verb|S4|  in \verb|sm_states.txt| represents state 4 and contains the following items:

	\[
		\begin{array}{ll}
			S4 \{ \\
			    & A : a\bullet  <<\$>> \\
			    & B : a\bullet  <<\$>> \\
			    & A : a\bullet  <<a>> \\
			\} \\
		\end{array}
	\]

	We see that the bodies of all items in S4 are the same and that the parser has completely recognised them. Two items reduce to production \verb|A| and one to production \verb|B|. This is the reduce/reduce conflict: \verb|A| vs \verb|B|.

	When \gocc is run with the \verb|-a| option it will automatically resolve this conflict by reducing production \verb|B|, because it is declared in \verb|rr.bnf| before \verb|A|:

	\begin{verbatim}
		> gocc -a rr.bnf
		Resolved 0 shift/reduce, 1 reduce/reduce conflicts
	\end{verbatim}

\section{Example: Shift/reduce conflict handling} \label{sec:example sr}
	The source code of the following example can be found at

	\verb|$GOPATH/src/code.google.com/p/gocc/example/shiftreduce|

	It is the classic example of the dangling else in the \Clang:

	\begin{verbatim}
		Stmt :
		        if expr then Stmt
		    |   if expr then Stmt else Stmt
		;
	\end{verbatim}

	When we run \gocc on \verb|$GOPATH/src/code.google.com/p/gocc/example/shiftreduce/sr.bnf| we discover a shift/reduce conflict:

	\begin{verbatim}
		> gocc -v sr.bnf
		LR(1) conflict: S11 Shift:12 / Reduce:1(Stmt)
		ABORTING: 1 LR(1) conflicts
	\end{verbatim}

	The problem is in the last two items of state 11, where the next symbol is \verb|else| and the parser can both shift and reduce:

	\[
		\begin{array}{ll}
			S11 \{ \\
			  & Stmt : if\ expr\ then\ Stmt\bullet\ <<\$>> \\
			  & Stmt : if\ expr\ then\ Stmt\ \bullet else\ Stmt\ <<\$>> \\
			  & Stmt : if\ expr\ then\ Stmt \bullet\ <<else>> \\
			  & Stmt : if\ expr\ then\ Stmt\ \bullet else\ Stmt\ <<else>> \\
			\} \\
		\end{array}
	\]

	When automatic LR(1) conflict resolution is selected by the \verb|-a| option, \gocc resolves this conflict in the same way as specified in the \Clang\ specification: by shifting and parsing the longest valid production ({\em maximal-munch}). This means recognising the \verb|else|-statement as part of the second \verb|if|.

\section{Example: Using an AST} \label{sec:example ast}
	The following example illustrates the use of user-provided action expressions to produce a simple abstract syntax tree (AST) for a list of simple statements. 

	The code for the example can be found at

	\verb|$GOPATH/src/code.google.com/p/gocc/example/astx|

	The grammar is in \verb|ast.bnf|:

	\begin{verbatim}
		<< import "code.google.com/p/gocc/example/astx/ast" >>

		StmtList : 
		      Stmt             << ast.NewStmtList($0) >>
		    | StmtList Stmt    << ast.AppendStmt($0, $1) >>
		;

		Stmt : 
		      id               << ast.NewStmt($0) >>
		;
	\end{verbatim}

	At the top of the grammar is an file header section containing an import statement for the user-provided package, 
	\verb|code.google.com/p/gocc/example/astx/ast|.

	The production action expressions will use functions from the package, \verb|ast|.

	The start production, \verb"StmtList" returns a tuple: \verb|(ast.StmtList, error)|, as we can see from the code of functions 
	\verb|NewStmtList| and \verb|AppendStmt| in 

	\verb|$GOPATH/src/code.google.com/p/gocc/example/astx/ast.go|:

	\begin{verbatim}
		package ast

		import(
		    "code.google.com/p/gocc/example/astx/token"
		)

		type (
		    StmtList	[]Stmt
		    Stmt 	string
		)

		func NewStmtList(stmt interface{}) (StmtList, error) {
		    return StmtList{stmt.(Stmt)}, nil
		}

		func AppendStmt(stmtList, stmt interface{}) (StmtList, error) {
		    return append(stmtList.(StmtList), stmt.(Stmt)), nil
		}	

		func NewStmt(stmtList interface{}) (Stmt, error) {
		    return Stmt(stmtList.(*token.Token).Lit), nil
		}
	\end{verbatim}

	Note the following:

	\begin{itemize}
		\item The attributes of the language symbols in the production are passed to the action expressions as parameters, referred to as $\$0, \$1, ...$

		\item The type of the parameters passed to the action expressions is \verb|interface{}| and must be type asserted by the called function to the expected type.

		\item The parser will return the result of a successful parse, a \verb"StmtList", to the calling application as type \verb"interface{}". The calling application must type assert the returned value to the expected type.
	\end{itemize}

	If we run 

	\verb"go test -v ." 

	from the directory 

	\verb|$GOPATH/src/code.google.com/p/gocc/example/astx/| 

	we get the following output:

	\begin{verbatim}
		> go test -v .
		warning: building out-of-date packages:
		        code.google.com/p/gocc/example/astx/token
		        code.google.com/p/gocc/example/astx/ast
		        code.google.com/p/gocc/example/astx/errors
		        code.google.com/p/gocc/example/astx/parser
		        code.google.com/p/gocc/example/astx/lexer
		installing these packages with 'go test -i .' will speed future tests.

		=== RUN TestPass
		input: a b c d e f
		output: [a b c d e f]
		--- PASS: TestPass (0.00 seconds)
		=== RUN TestFail
		input: a b ; d e f
		--- FAIL: TestFail (0.00 seconds)
		ast_test.go:23: 	Error: illegal -1(-1) ; @ 1:5, expected one of: id $
		        FAIL
		exit status 1
		FAIL	code.google.com/p/gocc/example/astx	0.015s	\end{verbatim}

	The first test, \verb"TestPass", has a valid input string, \verb|"a b c d e f"|; and parses successfully; and returns the expected StmtList, \verb|[a b c d e f]|.

	The input to the second test, \verb|TestFail|, contains an invalid identifier, \verb|;|. The parser returns an error, indicating that it encountered an invalid token when it expect a token of type \verb|id| or the end of input.

\section{Example: Parser error recovery} \label{sec:error recovery}
	Without error recovery a \gocc parser terminates when it reaches the first error in the input. Sometimes it is convenient to attempt to continue the parse and this can be achieved in \gocc by specifying in the grammar which productions can recover from errors in the input. 

	When the \gocc reserved word, \verb|error|, is the first symbol in a production alternative, it indicates that that production can recover from input errors.

	We modify the AST example to illustrate error recovery. See:

	\verb|$GOPATH/src/code.google.com/p/gocc/examples/errorrecovery/er.bnf|:

	\begin{verbatim}
		<< import "code.google.com/p/gocc/example/errorrecovery/ast" >>

		StmtList : 
		      Stmt             << ast.NewStmtList($0) >>
		    | StmtList Stmt    << ast.AppendStmt($0, $1) >>
		;

		Stmt : 
		      id               << ast.NewStmt($0) >>
		    | error
		;
	\end{verbatim}

	The production, \verb|Stmt|, now has an alternative: \verb"| error"

	This indicates to \gocc that input errors can be handles in production \verb|Stmt|.

	From the directory, 

	\verb|$GOPATH/src/code.google.com/p/gocc/examples/errorrecovery/|, 

	run \verb|go test| as follows:

	\begin{verbatim}
		> go test -v .
		warning: building out-of-date packages:
		        code.google.com/p/gocc/example/errorrecovery/token
		        code.google.com/p/gocc/example/errorrecovery/ast
		        code.google.com/p/gocc/example/errorrecovery/errors
		        code.google.com/p/gocc/example/errorrecovery/parser
		        code.google.com/p/gocc/example/errorrecovery/lexer
		installing these packages with 'go test -i .' will speed future tests.

		=== RUN TestFail
		input: a b ; d e f
		parser.firstRecoveryState: State 3
		parser.firstRecoveryState: State 1, canRecover, true
		output: [
		    a
		    error:
		        Err: nil
		        ErrorToken: ";"(-1)
		        ErrorPos: 1:5
		        ErrorSymbols: ["b"(1)]
		        ExpectedTokens: [error $ id]
		    d
		    e
		    f
		]
		--- PASS: TestFail (0.00 seconds)
		PASS		ok      code.google.com/p/gocc/example/errorrecovery	0.015s
	\end{verbatim}

	The test case can be found in \\
	\verb|$GOPATH/src/code.google.com/p/gocc/examples/errorrecovery/er_test.go|. \\
	It calls the parser with input string, \verb|"a b ; d e f"|, which contains an invalid token, \verb|;|.

	From the \verb|go test| output we see that the parser successfully recovered from the input error and returned a \verb|StmtList| containing an error symbol between \verb|[a| and \verb|d, e, f]|. The \verb|id|, \verb|b|, was lost in the error recovery. \TBDx{Explain exactly why the 'b' was lost.} The errored token was \verb|;| (invalid token) when the parser expected one of \verb|error, $| (end of input) or \verb|id|.

	The parser returned an error value of \verb|nil|, because it successfully recovered from the error.

	{\bf Note:} \\
	\begin{enumerate}
		\item When error recovery is allowed the user's code must expect errors and handle the appropriately in the code called by the production action expressions, as well as by the code handling the results returned by the parser.

		\item The parser will still return a non-\verb|nil| error value if it encounters an irrecoverable error.
	\end{enumerate}

	See \verb|$GOPATH/src/code.google.com/p/gocc/examples/errorrecovery/errors/error.go| for the definition of errors.Error.

\section{Example: Using another lexer} \label{sec:no lexer}
	The generation of lexer code can be suppressed with the \verb|no_lexer| option (see section~\ref{sec:commandline}). An example, 
	which uses a hand-written lexer, can be found at: 

	\verb|$GOPATH/src/code.google.com/p/gocc/examples/nolexer/nolexer.bnf|:

	\begin{verbatim}
<<
import (
    "fmt"
    "code.google.com/p/gocc/example/nolexer/token"
)
>>

Hello : Saying name   << func () (Attrib, error) {
                            fmt.Println(string($1.(*token.Token).Lit)); 
                            return nil, nil} () >> 
      ;

Saying : "hello"    << func () (Attrib, error) {
                            fmt.Print("hello "); 
                            return nil, nil} () >>
       | "hiya"     << func () (Attrib, error) {
                            fmt.Print("hiya "); 
                            return nil, nil} () >>
       ;	
    \end{verbatim}

    This grammar contains no lexical productions. We generate code for it using 

    \verb|$GOPATH/src/code.google.com/p/gocc/examples/nolexer/gen.sh|:

    \verb|gocc -no_lexer nolexer.bnf|

    After running gocc the directory structure contains the following files:
    \begin{verbatim}
> ls -R
errors		nolexer.bnf	parser		token
gen.sh		nolexer_test.go	scanner		util

./errors:
errors.go

./parser:
action.go		actiontable.go		gototable.go		parser.go		productionstable.go

./scanner:
scanner.go

./token:
token.go

./util:
litconv.go	rune.go
    \end{verbatim}

    Note the absence of a \verb|lexer| directory. Instead there is a hand-written scanner in 

    \verb|$GOPATH/src/code.google.com/p/gocc/examples/nolexer/scanner/scanner.go|

    The scanner implements the interface:

    \lstinputlisting[numbers=left, linerange=89-91, firstnumber=89]{../example/nolexer/parser/parser.go}

    which is declared in the generated parser:

    \verb|$GOPATH/src/code.google.com/p/gocc/examples/nolexer/parser/parser.go|.

    The scanner must recognise the tokens required by the parser generated from the BNF. They are declared in a \verb|TokenMap| in 

    \verb|$GOPATH/src/code.google.com/p/gocc/examples/nolexer/token/token.go|:

    \lstinputlisting[numbers=left,linerange=59-75,firstnumber=59]{../example/nolexer/token/token.go}

    \verb|$| is the EOF symbol used by the parser.

    \verb|hello| and \verb|hiya| are string literals from the BNF.

    \verb|name| is literal other than \verb|hello| and \verb|hiya|.

    Any lexeme not matching a valid terminal symbol of the grammar must be returned as \verb|INVALID|.

    \verb|$GOPATH/src/code.google.com/p/gocc/examples/nolexer/scanner/scanner.go| implements a lexical analyser for this grammar as follows:

    \lstinputlisting[numbers=left]{../example/nolexer/scanner/scanner.go}

    Examples of using the generated parser with the hand-written scanner are given in 

    \verb|$GOPATH/src/code.google.com/p/gocc/examples/nolexer/nolexer_test.go|

\section{gocc syntax}\label{sec:gocc syntax}
	A gocc source file contains UTF-8 encoded Unicode text.

	c-style block and line comments are allowed in the source code. All comments are suppressed by the lexer.
	In the following BNF \Code{!comment} means that \Code{comment} is a token, which is recognised and suppressed by the lexer -- it is not seen by the parser.

	\begin{Verbatim}[frame=single]
!comment : _lineComment | _blockComment ;

_lineComment : '/' '/' {.} '\n' ;

_blockComment : '/' '*' {. | '*'} '*' '/' ;
	\end{Verbatim}

	gocc source code consists of a sequence of tokens separated by white space. The white space characters are:

	\begin{tabular}{ccc}
		\bf Character & \bf Unicode value & \bf go char literal\\
		\hline
		space	& 0x20 & \verb|' '| \\
		horizontal tab & 0x09 & \verb|'\t'| \\
		newline & 0x0a & \verb|'\n'| \\
		carriage return & 0x0d & \verb|'\r'| \\
	\end{tabular}

	White space is suppressed by the lexer.

	\begin{Verbatim}[frame=single]
!whitespace : ' ' | '\t' | '\n' | '\r' ;
	\end{Verbatim}



	A gocc source file must contain at least one of a lexical part or a syntax part.

	\begin{Verbatim}[frame=single]
Grammar 
    : LexicalPart SyntaxPart
    | LexicalPart
    | SyntaxPart
    ;
\end{Verbatim}

The lexical part is a sequence of lexical productions.

\begin{Verbatim}[frame=single]
LexicalPart    
    : LexProductions
    ;
\end{Verbatim}

\begin{Verbatim}[frame=single]
LexProductions    
    : LexProduction
    | LexProductions LexProduction
    ;
\end{Verbatim}

Each lexical production is token definition or a regular definition or an ignored token definition. Token identifiers start with a lower case character in the range 'a'-'z', regular definition identifiers start with '\_' and ignored token identifiers start with '!'. 

Regular definitions are used as building blocks for other regular definitions, token definitions and ignored token definitions. Regular definitions may not be mutually or self recursive.

Token definitions define the tokens that are recognised by the lexer and passed to the parser.

Ignored token definitions define tokens that are recognised by the lexer but suppressed so that the parser never sees them. Comments and the white space separating tokens are examples of ignored tokens.

\begin{Verbatim}[frame=single]
LexProduction 
    : tokId ":" LexPattern ";"         // token definition
    | regDefId ":" LexPattern ";"      // regular definition
    | ignoredTokId ":" LexPattern ";"  // ignored token defition
    ;
\end{Verbatim}

Token identifiers start with a lowercase letter in the range 'a'-'z'.

\begin{Verbatim}[frame=single]
tokId : _tokId ;

_tokId : _lowcase {_id_char} ;

_lowcase : 'a'-'z' ;

_id_char : _upcase | _lowcase | '_' | _digit ;

_upcase : 'A'-'Z' ;

_digit : '0'-'9' ;
\end{Verbatim}

Regular definition identifiers start with \verb|'_'|.

\begin{Verbatim}[frame=single]
regDefId : '_' {_id_char} ;
\end{Verbatim}

Ignored token identifiers start with \verb|'!'|

\begin{Verbatim}[frame=single]
ignoredTokId : '!' _tokId ;
\end{Verbatim}

A lexical pattern is one or more alternatives, each consisting of a sequence of terms. 

\begin{Verbatim}[frame=single]
LexPattern     
    : LexAlt
    | LexPattern "|" LexAlt
    ;

LexAlt     
    : LexTerm
    | LexAlt LexTerm
    ;
\end{Verbatim}

A lexical term can be one of:

\begin{longtable}{lp{12cm}}
	\verb|.| & Match any UTF-8 rune. This match is only applied after all more specific terms have been matched. \\
	\verb|char_lit| & A specific character literal, e.g.: 'a'. \\
	\verb|char_lit "-" char_lit| & An inclusive range, e.g.: 'a'-'z' \\
	\verb|regDefId| & the identifier of a regular definition production in the BNF \\
	\verb|"[" LexPattern "]"| & An optional lexical pattern \\
	\verb|"{" LexPattern "}"| & Zero or more instances of a lexical pattern \\
	\verb|"(" LexPattern ")"| & A grouped lexical pattern, e.g.: \verb|('a' | 'b')| \\
\end{longtable}

\begin{Verbatim}[frame=single]
LexTerm 
    : "."
    | char_lit
    | char_lit "-" char_lit
    | regDefId
    | "[" LexPattern "]"
    | "{" LexPattern "}"
    | "(" LexPattern ")"
    ;
\end{Verbatim}

Character literals are specified as a valid Unicode or byte value, enclosed in single quotes:

\begin{Verbatim}[frame=single]
char_lit
    : '\'' (_unicode_value | _byte_value) '\'' 
    ;
\end{Verbatim}

A Unicode character may be specified as a character literal (e.g.: \verb'a'), a Unicode value or an escaped character.

\begin{Verbatim}[frame=single]
_unicode_value 
    : .                   // Any UTF-8 character literal
    | _little_u_value 
    | _big_u_value 
    | _escaped_char 
    ;

_byte_value       
    : _octal_byte_value 
    | _hex_byte_value 
    ;

_little_u_value   
    : '\\' 'u' _hex_digit _hex_digit _hex_digit _hex_digit 
    ;

_big_u_value
    : '\\' 'U' _hex_digit _hex_digit _hex_digit _hex_digit
               _hex_digit _hex_digit _hex_digit _hex_digit 
    ;

_escaped_char
    : '\\' ( 'a' | 'b' | 'f' | 'n' | 'r' | 't' | 'v' | '\\' | '\'' | '"' ) 
    ;

_octal_byte_value 
    : '\\' _octal_digit _octal_digit _octal_digit 
    ;

_hex_byte_value   
    : '\\' 'x' _hex_digit _hex_digit 
    ;

_octal_digit 
    : '0' - '7' 
    ;

_hex_digit 
    : '0' - '9' 
    | 'A' - 'F' 
    | 'a' - 'f'
    ;
\end{Verbatim}

The syntax part may start with an optional file header, followed by a sequence of syntax productions. The file header is textually included in the generated parser file, \verb|productiontable.go|, and is used to declare imports, constants, types and functions required by the action expressions of the productions.

\begin{Verbatim}[frame=single]
SyntaxPart
    : FileHeader SyntaxProdList
    | SyntaxProdList
    ;

FileHeader
    : action_lit
    ;

SyntaxProdList
    : SyntaxProduction
    | SyntaxProdList SyntaxProduction
    ;
\end{Verbatim}

Syntax productions define a context free language and may be mutually and self recursive. Left recursion is preferable to right recursion in LR grammar productions, as this leads to less stack activity during parsing. A syntax production starts with an identifier, followed by ":" and one or more alternative bodies, which are separated by "|".

Syntax  production identifiers start with an upper case character in the range 'A'-'Z'.

\begin{Verbatim}[frame=single]
SyntaxProduction    
    : prodId ":" Alternatives ";"
    ;

prodId 
    : _upcase {_id_char} 
    ;

Alternatives    
    : SyntaxBody
    | Alternatives "|" SyntaxBody
    ;
\end{Verbatim}

A syntax body is a sequence of syntax symbols, optionally followed by an action literal. If a syntax body starts with the keyword, \verb|error|, it indicates that the parser may accept an error in the input of the production and continue parsing if it is able to recover from the error.

A syntax body may also be empty, indicated by the reserved word, \verb|empty|. This allows you to write productions with optional elements, such as:

\begin{verbatim}
A : B "c";
B : "b" ;
\end{verbatim}

This grammar will match strings, \verb|b c| or \verb|c|.

\begin{Verbatim}[frame=single]
SyntaxBody
    : Symbols
    | Symbols action_lit
    | "error"
    | "error" Symbols
    | "error" Symbols action_lit
    | "empty"
    ;

Symbols    
    : Symbol
    | Symbols Symbol
    ;
\end{Verbatim}

An action literal is any string of characters enclosed in "<<" ">>". The contents of the action literal is invoke as an expression when the associated production body has been recognised by the parser. The action expression must return a tuple of type \verb| (interface{}, error)|. If the second parameter is not \verb|nil| the parser terminates and returns the error to its caller. If the second parameter is \verb|nil| the parser associates first parameter with the recognise syntax production as its attribute.

\begin{Verbatim}[frame=single]
action_lit    
    : '<' '<' . {.} '>' '>' 
    ;
\end{Verbatim}

gocc recognises the following terminal syntax symbols:

\begin{longtable}{ll}
	\verb|prodId| & The identifier of a syntax production defined in the grammar. \\
	\verb|tokId| & The identifier of lexical production defined in the grammar. \\
	\verb|string_lit| & A string literal value, e.g.: \verb|"a string"| \\
\end{longtable}

\begin{Verbatim}[frame=single]
Symbol
    : prodId
    | tokId
    | string_lit
    ;
\end{Verbatim}

A string literal can be any valid go raw string (e.g.: \verb|`a raw string`|) or interpreted string (e.g.: \verb|"an interpreted string"|).

\begin{Verbatim}[frame=single]
string_lit 
    : _raw_string 
    | _interpreted_string 
    ;

_raw_string 
    : '`' {.} '`' 
    ;

_interpreted_string 
    : '"' { _unicode_value | _byte_value } '"' 
    ;
\end{Verbatim}




\nocite{Parsing, Modern Compiler Design, Dragon Book, ABNF}
\begin{thebibliography}{99}
	\bibitem{Parsing}
	Dick Grune and Ceriel J.H. Jacobs.
	\newblock {\em Parsing Techniques. A Practical Guide. Second Edition}.
	\newblock Monographs in Computer Science, Springer, 2008
	
	\bibitem{Modern Compiler Design}
	Dick Grune, Kees van Reeuwijk, Henri E. Bal, Ceriel J.H. Jacobs and Koen Langendoen.
	\newblock {\em Modern Modern Compiler Design. Second Edition}.
	\newblock Springer 2012

	\bibitem{Dragon Book}
	Alfred V. Aho, Monica S. Lam, Ravi Sethi and Jeffrey D. Ullman.
	\newblock {\em Compilers. Principles, Techniques, \& Tools. Second Edition}.
	\newblock Addison Wesley, 2007

	\bibitem{ABNF}
	D. Crocker, Ed.
	\newblock{\em Augmented BNF for Syntax Specifications: ABNF}
	\newblock RFC 5234, January 2008

	\bibitem{gospec}
	{\em The Go Language Specification}
	\newblock \url{http://golang.org/ref/spec}

	
\end{thebibliography}


\end{document}
