/*
 * The following additions to the Array class make it implement the pushable
 * iterator interface
 */
Array.prototype.peek = function () {
    return this[this.length - 1];
}

Array.prototype.next = function () {
    return this.pop();
}

Array.prototype.pushArray = function (items) {
    this.push.apply(this, items);
};

// Stick everything in the "Gravy" namespace
var Gravy = new function () {

/* IterJoin
 *
 * A class that joins a bunch of iterators or single objects together. It pulls
 * everything it can from the first iterator and then moves onto the next. If
 * an object is not an iterator, it simply returns that object in turn and
 * moves onto the next.
 */
this.IterJoin = function(iters) {
    this.iters = iters;
}

this.IterJoin.prototype.next = function () {
    while (this.iters.length > 0) {
        // Check to see if it's an iterator
        if (typeof(this.iters[0]) == 'object' &&
                typeof(this.iters[0].next) == 'function') {
            var val = this.iters[0].next();
            if (val) {
                return val;
            } else {
                this.iters.shift();
                continue;
            }
        } else {
            return this.iters.shift();
        }
    }
    // Oops, we ran out of iterators.
    return null;
}

/* StringStream
 *
 * A class that implements a string-based stream. It supports regex searching
 * and matching. All of the internals are exposed as it's so simple that
 * there's no need to make any sort of interface.
 */
this.StringStream = function (text) {
    this.text = text;
    this.pos = 0;
};

/* This function searches a stream with the given regex. An optional "offset"
 * parameter as to where to start the search. There are no internal checks to
 * make sure this will actualy work, so be careful.
 *
 * NOTE: this throws off the internal regex position count in place of it's
 * own. It's your job to save it if you want it. The second argument is an
 * optional offset to start matching that many characters *before* the current
 * position of the stream
 */
this.StringStream.prototype.search = function(regex, offset) {
    if (offset) {
        regex.lastIndex = this.pos - offset;
    } else {
        regex.lastIndex = this.pos;
    }
    var match = regex.exec(this.text);
    if (match) {
        this.pos = regex.lastIndex;
    }
    return match;
};

/* This function attempts to match the stream with the given regex at it's
 * current spot.
 *
 * See Also StringStream.search
 */
this.StringStream.prototype.match = function(regex, offset) {
    var regex_pos;
    if (offset) {
        regex_pos = this.pos - offset;
    } else {
        regex_pos = this.pos;
    }
    regex.lastIndex = regex_pos;
    var match = regex.exec(this.text);
    if (match && regex.lastIndex == regex_pos + match[0].length) {
        this.pos = regex.lastIndex;
        return match;
    } else {
        return null;
    }
};

/* CapturingIterator
 *
 * Implements an iterator that captures everything you pull off of it so that,
 * if needed, you can push it back for infinite "undo"
 */
var CapturingIterator = function (iterator) {
    this.iter = iterator;
    this.queue = [];
}

CapturingIterator.prototype.next = function () {
    var item = this.iter.next();
    this.queue.unshift(item);
    return item;
}

CapturingIterator.prototype.peek = function () {
    return this.iter.peek();
}

var PushableIterator = function (iterator) {
    this.iter = iterator;
    this.done = false;
    this.stack = [];
};

/* PushableIterator
 *
 * Wraps a normal iterator in a pushable iterator. Basically, it allows you to
 * shove stuff back on that you didn't want to pull off.
 */
PushableIterator.prototype.next = function() {
    if (this.stack.length > 0) {
        // Pull stuff from the stack first
        return this.stack.shift();
    } else if (! this.done) {
        // Otherwise, pull it from the iterator
        var item = this.iter.next();
        if (item === null || item === undefined) {
            this.done = true;
            return null;
        }
        return item;
    } else {
        return null;
    }
};

PushableIterator.prototype.peek = function() {
    if (this.stack.length == 0) {
        // Before I was doing some weird stuff in order to allow peeking, but
        // I've discovered that it works best just to grab it from the iterator
        // and push it.
        this.push(this.iter.next());
    }
    return this.stack[0]
};

PushableIterator.prototype.push = function (item) {
    this.stack.unshift(item);
};

PushableIterator.prototype.pushArray = function (items) {
    this.stack.unshift.apply(this.stack, items.reverse());
};

/* BidirectionalMap
 *
 * Provides a bidirectional map similar to an object (without the nice
 * symantics). Stuff is stored as "left" and "right".
 */
var BidirectionalMap = function (map) {
    this.left = map;
    this.right = {};
    for (key in map) {
        this.right[map[key]] = key;
    }
}

BidirectionalMap.prototype.add = function (left, right) {
    this.left[left] = right;
    this.right[right] = left;
}

/*
 * Get the right associated with the given left
 */
BidirectionalMap.prototype.getByLeft = function (left) {
    return this.left[left]
}

/*
 * Get the left associated with the given right
 */
BidirectionalMap.prototype.getByRight = function (right) {
    return this.right[right]
}

/* LexerRule
 *
 * Implements a single rule for the lexer. A rule is made up of a regular
 * expression and an associated callback.
 *
 * Due to JavaScripts inability to support zero-width lookbehinds, I had to
 * hack things together to support that. If you need a lookbehind, supply
 * everything you would inside the (? ) (including the '=' or '!') as well as
 * the search width of the lookbehind (they're fixed-width in Perl too.) The
 * LexerRule class will then do a little magic to fake it.
 */
this.LexerRule = function (regex, callback, offset, lookback) {
    this.regex = regex;
    this.callback = callback;
    if (offset && lookback) {
        // These two have to come together
        this.offset = offset;
        this.lookback = lookback;
    }
};

/*
 * This function builds the regex for the rule. It returns an array of
 * expressions, one for each offset starting at 0 and ending at max_offset. The
 * array is indexed as you would expect.
 */
this.LexerRule.prototype._build_regex = function (max_offset) {
    rules = new Array(max_offset + 1);
    if (this.offset) {
        // If we have offset, we have lookback too.
        if (this.lookback[0] == '!') {
            // If the lookback is negative, we have valid rules starting at 0,
            // they simply match up until the expression.
            rules[0] = '(' + this.regex + ')';
            // Fill in the rules
            for (i = 1; i < this.offset; ++i) {
                rules[i] = '(?:.|\\n)' + rules[i - 1];
            }
        }
        // Build the base rule with the lookback
        rules[this.offset] = '(?' + this.lookback + ')(?:.|\\n){'
                + String(this.offset) + '}(' + this.regex + ')';
        // Fill in the rules
        for (i = this.offset + 1; i <= max_offset; ++i) {
            rules[i] = '(?:.|\\n)' + rules[i - 1];
        }
    } else {
        // If we don't have an offset, it's simple.
        rules[0] = '(' + this.regex + ')';
        for (i = 1; i <= max_offset; ++i) {
            rules[i] = '(?:.|\\n)' + rules[i - 1];
        }
    }
    return rules;
};

/* Lexer
 *
 * Implements a lexer. The lexer takes a list of rules, an optional 'flags'
 * argument, which is applied to the lexeing regular expressions (really, only
 * 'm' is supported). Finally, it takes an optional fallback function that is
 * called if nothing matches. If no fallback is given, a failed match results
 * in a thrown exception.
 */
this.Lexer = function (rules, flags, fallback) {
    // Set up a few variables; note that we always have a 'g' flag.
    if (flags) {
        this.flags = 'g' + flags;
    } else {
        this.flags = 'g';
    }
    this.rules = rules;
    this.fallback = fallback;

    // Calculate the max offset
    this.max_offset = 0;
    for (i = 0; i < rules.length; ++i) {
        if (this.max_offset < this.rules[i].offset) {
            this.max_offset = this.rules[i].offset;
        }
    }

    // Create the and initialize the regex array
    this.regex = new Array(this.max_offset + 1);
    this.callbacks = new Array(this.max_offset + 1);
    for (j = 0; j <= this.max_offset; ++j) {
        this.regex[j] = '';
        this.callbacks[j] = [];
    }
    // Add the regex from the other rules to the regexs
    for (var i = 0; i < this.rules.length; ++i) {
        rule_regex = this.rules[i]._build_regex(this.max_offset);
        for (var j = 0; j <= this.max_offset; ++j) {
            if (rule_regex[j]) {
                this.regex[j] += '|' + rule_regex[j];
                this.callbacks[j].push(this.rules[i].callback);
            }
        }
    }
    // Finish up and compile (sorry opera)
    for (j = 0; j <= this.max_offset; ++j) {
        if (this.regex[j] == '') {
            this.regex[j] = null;
        } else {
            // Get rid of the first '|'
            this.regex[j] = this.regex[j].substr(1);
            this.regex[j] = RegExp(this.regex[j], this.flags);
            this.regex[j].compile(this.regex[j]);
        }
    }
};

/*
 * This does the actual lexing magic
 */
this.Lexer.prototype._next_token_iter = function(stream) {
    // Take the minimum of max_offset or the stream position
    if (stream.pos >= stream.text.length) {
        return null;
    }
    // Calculate the actual offset based on the max offset and the stream
    // position
    var offset = stream.pos < this.max_offset ? stream.pos : this.max_offset;

    // Attempt to match
    var match;
    if (this.regex[offset]) {
        match = stream.match(this.regex[offset], offset);
    }

    // Find the match and call the appropreate callback
    if (match) {
        for (var i = 1; i < match.length; ++i) {
            if (! (match[i] == undefined || match[i] == null)) {
                return this.callbacks[offset][i - 1](match[i], stream);
            }
        }
    } else if (this.fallback != undefined) {
        // Fallback
        return this.fallback(stream);
    }
    // Failed to match
    throw "Syntax Error at " + String(stream.pos);
};

this.Lexer.prototype.tokenize = function(stream) {
    return new LexerIter(this, stream);
};

/* LexerIter
 *
 * An iterator that gets tokens from a lexer
 */
var LexerIter = function (lexer, stream) {
    this.lexer = lexer;
    this.stream = stream;
    this._iter = null;
    this.done = false;
}

LexerIter.prototype.next = function () {
    while (! this.done) {
        if (this._iter == null) {
            // If we don't have an active iterator, get one.
            this._iter = this.lexer._next_token_iter(this.stream);
            if (this._iter === null || this._iter === undefined) {
                this.done = true;
                return null;
            }
        }
        // They can either return an iterator (in which case we repeatedly
        // call it) or they can simply return a value
        if (typeof(this._iter) == 'object' &&
                typeof(this._iter.next) == 'function') {
            var val = this._iter.next();
            if (val === null || val === undefined) {
                this._iter = null;
                continue;
            } else {
                return val;
            }
        } else {
            var tmp = this._iter;
            this._iter = null;
            return tmp;
        }
    }
    return null;
}

/* LexerStack
 *
 * Provides a concept of a stack of lexers. This acts just like a lexer, but
 * has several lexers in it.
 */
this.LexerStack = function (base) {
    this._stream = null;
    this.base = base;
    this._iter = null;
    this.stack = [];
    this._iterStack = [];
}

/*
 * Resets the stack back to empty. Please don't call this unless you need to.
 */
this.LexerStack.prototype.clear = function () {
    this.stack = [];
}

this.LexerStack.prototype.pushLexer = function (lexer) {
    this.stack.unshift(lexer);
    this._iterStack.unshift(new LexerIter(lexer, this._stream));
}

// If you call this inside a lexer tokenize or iteration call, you MUST return
// something otherwise things will break.
this.LexerStack.prototype.popLexer = function () {
    this._iterStack.shift();
    return this.stack.shift();
}

/*
 * Gets the next token from the stack of lexers. This is where all the magic is
 * done.
 */
this.LexerStack.prototype._nextToken = function () {
    var token;
    if (this.stack.length > 0) {
        token = this._iterStack[0].next();
        // Take care of the case where it was left from a previous stream
        if ((token === null || token === undefined) &&
                (this._iterStack[0].stream != this._stream)) {
            this._iterStack[0] = new LexerIter(this.stack[0], this._stream);
            token = this._iterStack[0].next();
        }
    } else {
        token = this._iter.next();
        // Take care of the case where it was left from a previous stream
        if ((token === null || token === undefined) &&
                (this._iter.stream != this._stream)) {
            this._iter = new LexerIter(this.base, this._stream);
            token = this._iter[0].next();
        }
    }
    return token;
}

this.LexerStack.prototype.tokenize = function (stream) {
    this._stream = stream;
    this._iter = new LexerIter(this.base, stream);
    var lexerStack = this;
    // I didn't bother implementing a whole iterator class just for the
    // LexerStack... Meh.
    return {next: function () {
        return lexerStack._nextToken();
    }};
}

/* Token
 *
 * Implements a token. Basically it just provides a nice way of stringifying a
 * token for debugging purposes.
 */
this.Token = function (type, text) {
    this.type = type;
    if (text === undefined) {
        this.text = type;
    } else {
        this.text = text;
    }
};

this.Token.prototype.toString = function () {
    var str = '<Token: '
    var vals = [];
    for (val in this) {
        if (typeof(this[val]) != 'function') {
            vals.push(String(val) + ': "' + String(this[val]) + '"');
        }
    }
    str += vals.join(', ');
    return str + '>';
};

this.Tokenizer = function () {
    var tokenizer = this;

    var tokenize_delimiter = function (match) {
        return new Gravy.Token('delimiter', match);
    };
    var tokenize_text = function (match) {
        return new Gravy.Token('text', match);
    };
    var tokenize_special_char = function (match) {
        return new Gravy.Token('special character', match);
    };
    var tokenize_whitespace = function (match) {
        return new Gravy.Token('whitespace', match);
    };
    var tokenize_symbol = function (match) {
        return new Gravy.Token(match);
    };
    var tokenize_escape = function (match) {
        return new Gravy.Token('escape', match);
    };

    this.ref_val_lexer = new Gravy.Lexer([
        // Escape Character
        new Gravy.LexerRule('\\\\(?:.|\\n)', tokenize_escape),
        // Line Break
        new Gravy.LexerRule('\\n', function (match) {
            tokenizer.lexer_stack.popLexer();
            return tokenize_symbol(match);
        }),
        // Double-Quote
        new Gravy.LexerRule('"', tokenize_delimiter),
        // Equals
        new Gravy.LexerRule('=', tokenize_symbol),
        // Comma
        new Gravy.LexerRule(',', tokenize_symbol),
        // Whitespace
        new Gravy.LexerRule('\\s', tokenize_whitespace),
        // Text
        new Gravy.LexerRule('[\\w-]', tokenize_text),
        new Gravy.LexerRule('.', tokenize_special_char)
    ]);

    this.inline_lexer = new Gravy.Lexer([
        // Escape Character
        new Gravy.LexerRule('\\\\(?:.|\\n)', tokenize_escape),
        // Line Break
        new Gravy.LexerRule('\\n', function (match) {
            tokenizer.lexer_stack.popLexer();
            return tokenize_symbol(match);
        }),
        // Double-quote
        new Gravy.LexerRule('"', tokenize_delimiter),
        // Equals (used for extensions)
        new Gravy.LexerRule('=', tokenize_symbol),
        // Comma (used for extensions)
        new Gravy.LexerRule(',', tokenize_symbol),
        // En dash:
        new Gravy.LexerRule('--(?!-)', tokenize_symbol, 1, '!-'),
        // Em dash:
        new Gravy.LexerRule('---(?!-)', tokenize_symbol, 1, '!-'),
        // Ellipsis:
        new Gravy.LexerRule('\\.\\.\\.(?!\\.)', tokenize_symbol, 1, '!\\.'),
        // Non-breaking space
        new Gravy.LexerRule('~(?=\\S)', tokenize_symbol, 1, '=\\S'),
        // Strong delimiter
        new Gravy.LexerRule('\\*', tokenize_delimiter),
        // Emphasis delimiter
        new Gravy.LexerRule('_', tokenize_delimiter),
        // Code delimiter
        new Gravy.LexerRule('`', tokenize_delimiter),
        // Extension delimiter
        new Gravy.LexerRule('\\[', tokenize_delimiter),
        new Gravy.LexerRule('\\]', tokenize_delimiter),
        // Whitespace
        new Gravy.LexerRule('\\s', tokenize_whitespace),
        // Text
        new Gravy.LexerRule('[\\w-]', tokenize_text),
        new Gravy.LexerRule('.', tokenize_special_char)
    ]);

    var header_re = /^\s*(=+)\s*$/;
    header_re.compile(header_re);
    var indent_re = /^\n?(\s*)\S/;
    indent_re.compile(indent_re);
    var ref_val_re = /^\s+\[([\w\-]+)\]([\w\-]*):/;
    ref_val_re.compile(ref_val_re);

    this.block_lexer = new Gravy.Lexer([
        // Line Break
        new Gravy.LexerRule('\\n', tokenize_symbol),
        // Header
        new Gravy.LexerRule('\\s*=+\\s*(?=[^=\\s]|$)', function (match) {
            var m = header_re.exec(match);
            var level = m[1];
            var header_token = new Gravy.Token('header', level);
            header_token.level = level.length;
            tokenizer.lexer_stack.pushLexer(tokenizer.inline_lexer);
            return header_token
        }),
        // Ordered Item
        new Gravy.LexerRule('\\s*\\d*\\.\\s+', function (match) {
            m = match.match(indent_re);
            list_token = new Gravy.Token('ordered item', match);
            list_token.level = m[1].length;
            tokenizer.lexer_stack.pushLexer(tokenizer.inline_lexer);
            return list_token;
        }),
        // Unordered Item
        new Gravy.LexerRule('\\s*\\*\\s+', function (match) {
            m = match.match(indent_re);
            list_token = new Gravy.Token('unordered item', match);
            list_token.level = m[1].length;
            tokenizer.lexer_stack.pushLexer(tokenizer.inline_lexer);
            return list_token;
        }),
        // Block Quote
        new Gravy.LexerRule('[ \\t]*>[ \\t]*(?=[ \\t\\n])', function (match) {
            return new Gravy.Token('block quote', match);
        }),
        // Reference Values
        new Gravy.LexerRule('\\s+\\[[\\w\\-]+\\][\\w\\-]*:', function (match) {
            var m = ref_val_re.exec(match);
            var ref_token = new Gravy.Token('reference',
                    '[' + m[1] + ']' + m[2] + ':');
            ref_token.id = m[1];
            ref_token.callName = m[2];
            tokenizer.lexer_stack.pushLexer(tokenizer.ref_val_lexer);
            return ref_token;
        }),
        // Text
    ], 'm', function () {
        tokenizer.lexer_stack.pushLexer(tokenizer.inline_lexer);
        return tokenizer.lexer_stack._nextToken();
    });

    this.lexer_stack = new Gravy.LexerStack(this.block_lexer);
};

this.Tokenizer.prototype.tokenize = function (data) {
    return new TokenizerIter(
            this.lexer_stack.tokenize(new Gravy.StringStream(data)));
};

var TokenizerIter = function (lexerIter) {
    this.iterStack = [lexerIter];
    this.text = '';
}

TokenizerIter.prototype.next = function () {
    while (this.iterStack[0] != undefined) {
        var token = (this.iterStack[0]).next();
        if (token === null) {
            this.iterStack.shift();
            continue;
        } else if (token.type == 'text') {
            this.text += token.text;
            continue;
        } else if (this.text != '') {
            var tmp = this.text;
            this.text = '';
            this.iterStack.unshift(new Gravy.IterJoin([token]));
            return new Gravy.Token('text', tmp);
        } else {
            return token;
        }
    }
    if (this.text != '') {
        var tmp = this.text;
        this.text = '';
        return new Gravy.Token('text', tmp);
    }
    return null;
}

this.GravyDomGenerator = function () {
};

this.GravyDomGenerator.prototype.create = function (type, params) {
    var obj = {};
    if (params !== undefined) {
        for (key in params) {
            obj[key] = params[key];
        }
    }
    obj.parentNode = null;
    obj.type = type;
    if (obj.type != 'text') {
        obj.children = [];
    }
    return obj;
};

this.GravyDomGenerator.prototype.appendChild = function (par, child) {
    par.children.push(child);
    child.parentNode = par;
};

this.GravyDomGenerator.prototype.appendText = function (par, text) {
    if (par.children.length > 0 &&
            par.children[par.children.length - 1].type == 'text') {
        par.children[par.children.length - 1].text += text;
    } else {
        par.children.push({type: 'text', text: text});
    }
};

this.GravyDomGenerator.prototype.findParentByType = function (node, type) {
    var cur = node.parentNode;
    while (cur != null) {
        if (cur.type == type) {
            return cur;
        }
    }
    return null;
};

this.GravyDomGenerator.prototype.getNodeType = function (node) {
    return node.type;
};

this.HTMLDomGenerator = function (root) {
    this.root = root;
};

this.HTMLDomGenerator._dom_map = new BidirectionalMap({
    'strong': 'strong',
    'emphasized': 'em',
    'paragraph': 'p',
    'block quote': 'blockquote',
    'unordered list': 'ul',
    'ordered list': 'ol',
    'list item': 'li',
    'quote': 'q'
});

this.HTMLDomGenerator._symbol_map = {
    'em dash': '\u2014',
    'en dash': '\u2013',
    'ellipsis': '\u2026',
    'nbsp': '\240'
};

this.HTMLDomGenerator.prototype.create = function (type, params) {
    if (type == 'root') {
        // Clear out the root
        while (this.root.lastChild) {
            this.root.removeChild(this.root.lastChild);
        }
        return this.root;
    } else if (type == 'header') {
        return document.createElement('h' + String(params.level));
    } else if (type == 'code') {
        var code_elem = document.createElement('code');
        code_elem.appendChild(document.createElement('pre'));
        return code_elem;
    } else if (type == 'latex block') {
        var latex_elem = document.createElement('script');
        latex_elem.setAttribute('type', 'math/tex; mode=display');
        return latex_elem;
    } else if (type == 'latex inline') {
        var latex_elem = document.createElement('script');
        latex_elem.setAttribute('type', 'math/tex');
        return latex_elem;
    } else if (Gravy.HTMLDomGenerator._dom_map.getByLeft(type)) {
        return document.createElement(
                Gravy.HTMLDomGenerator._dom_map.getByLeft(type));
    } else if (Gravy.HTMLDomGenerator._symbol_map[type]) {
        return Gravy.HTMLDomGenerator._symbol_map[type];
    }
};

this.HTMLDomGenerator.prototype.appendChild = function (par, child) {
    if (par.tagName.toLowerCase() == 'code') {
        this.appendChild(par.firstChild, child);
    }
    if (typeof(child) == 'string') {
        this.appendText(par, child);
    } else {
        par.appendChild(child);
    }
};

this.HTMLDomGenerator.prototype.appendText = function (par, text) {
    if (par.lastChild && par.lastChild.nodeType == 3) {
        par.lastChild.appendData(text);
    } else {
        par.appendChild(document.createTextNode(text));
    }
};

this.HTMLDomGenerator.prototype.findParentByType = function (node, type) {
    var html_type = Gravy.HTMLDomGenerator._dom_map.getByLeft(type);
    var cur = node.parentNode;
    while (cur != null) {
        if (cur.nodeType == 1 && cur.tagName.toLowerCase() == html_type) {
            return node;
        }
        node = node.parentNode;
    }
    return null;
};

this.HTMLDomGenerator.prototype.getNodeType = function (node) {
    var html_type = node.tagName.toLowerCase();
    if (html_type.match(/h[1-9]/)) {
        return 'header';
    } else if (html_type == 'code') {
        return 'code';
    } else if (html_type == 'script') {
        var script_type = node.getAttribute('type')
        if (script_type == 'math/tex') {
            return 'latex inline';
        } else if (script_type == 'math/tex; mode=display') {
            return 'latex block';
        }
    } else {
        return Gravy.HTMLDomGenerator._dom_map.getByRight(html_type);
    }
};

this.Parser = function (domGenerator) {
    this.generator = domGenerator;
    this._testing = false;
};

this.Parser._inline_types = {
    '"': true,
    '=': true,
    ',': true,
    '---': true,
    '--': true,
    '...': true,
    '~': true,
    ',': true,
    'delimiter': true,
    'whitespace': true,
    'text': true,
    'escape': true,
    'special character': true
};

this.Parser._delimiter_map = {
    '*': 'strong',
    '_': 'emphasized',
    '`': 'code',
    '"': 'quote'
};

this.Parser.prototype.parse = function (tokenIter) {
    var root = this.generator.create('root');
    var tokens = new PushableIterator(tokenIter);
    tokens = this._parseReferenceValues(tokens);

    this._parseBlocks(root, tokens);

    return root;
};

this.Parser.prototype._parseInline = function (root, tokens) {
    while (true) {
        var token = tokens.next();
        if (token === null || token === undefined) {
            break;
        } else if (token.type == 'delimiter') {
            var node_type = Gravy.Parser._delimiter_map[token.text];
            if (node_type == 'code') {
                var new_node = this.generator.create('code');
                while (true) {
                    token = tokens.next();
                    if (token === null ||
                            (! Gravy.Parser._inline_types[token.type])) {
                        log_msg("Unterminated delimiter: code");
                        tokens.push(token);
                        break;
                    } else if (token.type == 'delimiter' && token.text == '`') {
                        break;
                    } else {
                        this.generator.appendText(new_node, token.text);
                    }
                }
                this.generator.appendChild(root, new_node);
            } else if (token.text == '[') {
                var iter = new CapturingIterator(tokens);
                var new_node = this._parseExtensionCall(iter);
                if (new_node) {
                    this.generator.appendChild(root, new_node);
                } else {
                    this.generator.appendText(root, '[');
                    tokens.pushArray(iter.queue);
                }
            } else if (token.text == ']') {
                // This is really a dummy, but we should call it a delimiter
                this.generator.appendText(root, ']');
            } else if (node_type) {
                if (this.generator.getNodeType(root) == node_type) {
                    break;
                } else if (this.generator.findParentByType(root, node_type)) {
                    tokens.push(token);
                    log_msg("Unterminated delimiter: " + node_type);
                    break;
                }

                var new_node = this.generator.create(node_type);
                this._parseInline(new_node, tokens);
                this.generator.appendChild(root, new_node);
            } else {
                // I don't know what to do with it
                this.generator.appendText(root, token.text);
            }
        } else if (token.type == '---') {
            this.generator.appendChild(root, this.generator.create('em dash'));
        } else if (token.type == '--') {
            this.generator.appendChild(root, this.generator.create('en dash'));
        } else if (token.type == '...') {
            this.generator.appendChild(root, this.generator.create('ellipsis'));
        } else if (token.type == '~') {
            this.generator.appendChild(root, this.generator.create('nbsp'));
        } else if (token.type == ',' || token.type == '=') {
            this.generator.appendText(root, token.text);
        } else if (token.type == 'text' || token.type == 'whitespace' ||
                token.type == 'special character') {
            this.generator.appendText(root, token.text);
        } else if (token.type == 'escape') {
            if (token.text[1] == '\n') {
                // Nom Whitespace
                while (tokens.peek() && tokens.peek().type == 'whitespace') {
                    tokens.next();
                }
            } else {
                this.generator.appendText(root, token.text[1]);
            }
        } else if (token.type == '\n') {
            this.generator.appendText(root, token.text);
            if (tokens.peek() &&
                    Gravy.Parser._inline_types[tokens.peek().type]) {
                continue;
            } else {
                return;
            }
        } else {
            tokens.push(token);
            return;
        }
    }
};

var ExtensionArgParser = function () {
    this.args = [];
    this.namedArgs = {};
    this.inQuotes = false;
    this.specials = false;
    this.name = null;
    this.text = '';
    this.ws = '';
}

// Returns true if there was an error.
ExtensionArgParser.prototype.handleToken = function (token) {
    if (! Gravy.Parser._inline_types[token.type]) {
        // This is an error
        return true;
    } else if (token.type == 'delimiter' && token.text == '"') {
        this.specials = true;
        this.inQuotes = ! this.inQuotes;
    } else if (token.type == 'escape') {
        if (token.text[1] != '\n') {
            this.specials = true;
            if (token.text[1] == '"' || token.text[1] == '\\' ) {
                this.text += this.ws + token.text[1];
            } else if (! this.inQuotes &&
                    (token.text[1] == ',' ||
                    token.text[1] == '=' ||
                    token.text[1] == ']')) {
                this.text += this.ws + token.text[1];
            } else {
                this.text += this.ws + token.text;
            }
            this.ws = '';
        }
    } else if (this.inQuotes) {
        this.text += this.ws + token.text;
        this.ws = '';
    } else if (token.type == '=') {
        if (this.specials) {
            // Special characters aren't allowed in argument names
            return true;
        }
        this.name = this.text;
        this.text = '';
        this.ws = '';
    } else if (token.type == ',') {
        if (this.name === null) {
            this.args.push(this.text)
        } else {
            this.namedArgs[this.name] = this.text;
        }
        // Reset for the next argument
        this.specials = false;
        this.name = null;
        this.text = '';
        this.ws = '';
    } else if (token.type == 'whitespace') {
        if (this.text != '') {
            this.ws += token.text;
        }
    } else if (token.type == 'text') {
        this.text += this.ws + token.text;
        this.ws = '';
    } else {
        // We treat this as text.
        this.text += this.ws + token.text;
        this.ws = '';
        this.specials = true;
    }
    return false;
}

ExtensionArgParser.prototype.finalize = function () {
    if (this.text != '') {
        if (this.name === null) {
            this.args.push(this.text)
        } else {
            this.namedArgs[this.name] = this.text;
        }
    }
}

this.Parser.prototype._parseExtensionCall = function (tokens) {
    var token = tokens.next();

    // Handle the case of "[macro]"
    if (token.type == 'text' &&
            tokens.peek() &&
            tokens.peek().type == 'delimiter' &&
            tokens.peek().text == ']' &&
            this._isExtension(token.text)) {
        tokens.next();
        return this._callReference(token.text, [], {});
    }

    var argParser = new ExtensionArgParser();

    while (argParser.inQuotes || token && (
            ! (token.type == 'delimiter' && token.text == ']'))) {
        var err = argParser.handleToken(token);
        if (err) {
            return null;
        }
        token = tokens.next();
    }
    argParser.finalize();

    // At this point the current token is a ']'
    token = tokens.next();
    // Nom all the whitespace until we get a '['
    while (tokens.type == 'whitespace') {
        token = tokens.next();
    }
    if (! token || token.type != 'delimiter' || token.text != '[') {
        return null;
    }

    // Just like the case at the top
    token = tokens.next();
    if (token.type == 'text' &&
            tokens.peek() &&
            tokens.peek().type == 'delimiter' &&
            tokens.peek().text == ']' &&
            this._isExtension(token.text)) {
        tokens.next();
        return this._callReference(token.text, argParser.args,
                argParser.namedArgs);
    } else {
        return null;
    }
}

this.Parser.prototype._parseBlocks = function (root, tokens) {
    while (tokens.peek() != null) {
        var token = tokens.peek();
        if (token.type == "\n") {
            tokens.next();
            continue;
        } else if (token.type == 'header') {
            this.generator.appendChild(root, this._parseHeader(tokens));
        } else if (token.type == 'unordered item') {
            this.generator.appendChild(root, this._parseUnorderedList(tokens));
        } else if (token.type == 'ordered item') {
            this.generator.appendChild(root, this._parseOrderedList(tokens));
        } else if (token.type == 'block quote') {
            this.generator.appendChild(root, this._parseBlockQuote(tokens, 1));
        } else if (Gravy.Parser._inline_types[token.type]) {
            paragraph = this.generator.create('paragraph');
            this._parseInline(paragraph, tokens);
            this.generator.appendChild(root, paragraph);
        }
    }
}

this.Parser.prototype._parseHeader = function (tokens) {
    var token = tokens.next();
    var header = this.generator.create('header', {
        level: token.level
    });

    var inline_tokens = [];
    while (true) {
        token = tokens.next();
        if (token === null || token === undefined) {
            break;
        } else if (token.type == "\n") {
            break;
        } else if (! Gravy.Parser._inline_types[token.type]) {
            tokens.push(token);
            break;
        } else {
            inline_tokens.unshift(token);
        }
    }

    // Nom whtiespace then '=' then whitespace again from the end of the list
    // of tokens (i.e. clean up the end of the header
    while (inline_tokens.length > 0 && inline_tokens[0].type == 'whitespace') {
        inline_tokens.shift();
    }
    while (inline_tokens.length > 0 && inline_tokens[0].type == '=') {
        inline_tokens.shift();
    }
    while (inline_tokens.length > 0 && inline_tokens[0].type == 'whitespace') {
        inline_tokens.shift();
    }
    this._parseInline(header, inline_tokens);
    return header;
}

this.Parser.prototype._parseListItem = function (indent, tokens) {
    var item = this.generator.create('list item');
    while (true) {
        this._parseInline(item, tokens);
        var token = tokens.peek();

        if (token === null || token === undefined) {
            break;
        } else if (token.type == 'ordered item' && token.level > indent) {
            this.generator.appendChild(item, this._parseOrderedList(tokens));
            continue;
        } else if (token.type == 'unordered item' && token.level > indent) {
            this.generator.appendChild(item, this._parseUnorderedList(tokens));
            continue;
        } else {
            break;
        }
    }
    return item;
}

this.Parser.prototype._parseUnorderedList = function (tokens) {
    var list = this.generator.create('unordered list');
    var indent = tokens.peek().level;
    while (true) {
        var token = tokens.peek();
        if (token === null || token === undefined) {
            break;
        } else if (token.type != 'unordered item' || token.level < indent) {
            break;
        }
        tokens.next();
        this.generator.appendChild(list, this._parseListItem(indent, tokens));
    }
    return list;
}

this.Parser.prototype._parseOrderedList = function (tokens) {
    var list = this.generator.create('ordered list');
    var indent = tokens.peek().level;
    while (true) {
        var token = tokens.peek();
        if (token === null || token === undefined) {
            break;
        } else if (token.type != 'ordered item' || token.level < indent) {
            break;
        }
        tokens.next();
        this.generator.appendChild(list, this._parseListItem(indent, tokens));
    }
    return list;
}

this.Parser.prototype._parseBlockQuote = function (tokens, level) {
    var quote = this.generator.create('block quote');
    subtokens = [];
    while (true) {
        var token = tokens.peek();
        if (token === null || token === undefined ||
                token.type != 'block quote') {
            break;
        } else {
            tokens.next();
        }
        // Nom whitespace
        while (tokens.peek() && tokens.peek().type == 'whitespace') {
            tokens.next();
        }
        while (true) {
            token = tokens.next();
            subtokens.unshift(token);
            if (token === null || token === undefined || token.type == '\n') {
                break;
            }
        }
    }
    this._parseBlocks(quote, subtokens);
    return quote;
}

this.Parser.prototype._parseReferenceValues = function (tokens) {
    this._references = {};
    var retTokens = [];
    var token;
    while (true) {
        token = tokens.next();
        if (token === null || token === undefined) {
            break;
        } else if (token.type == 'reference') {
            // Stash This
            var ref_token = token;

            // Parse the reference args
            var argParser = new ExtensionArgParser();
            token = tokens.next();
            while (token.type != '\n') {
                // TODO: Error handling
                argParser.handleToken(token);
                token = tokens.next();
            }
            argParser.finalize();

            this._references[ref_token.id] = {
                'callName': ref_token.callName,
                'args': argParser.args,
                'namedArgs': argParser.namedArgs
            };
        } else {
            retTokens.unshift(token);
        }
    }
    return retTokens;
}

this.Parser.prototype._isExtension = function (id) {
    if (this._references[id]) {
        return true;
    } else if (this._testing && id == 'TestNull') {
        return true;
    } else {
        return false;
    }
}

this.Parser.prototype._callReference = function (id, args, namedArgs) {
    if (this._references[id]) {
        var ref = this._references[id];
        args = args.concat(ref.args);
        for (var arg in ref.namedArgs) {
            if (! arg in namedArgs) {
                namedArgs[arg] = ref.namedArgs[arg];
            }
        }
        return this._callReference(ref.callName, args, namedArgs);
    } else if (this._testing && id == 'TestNull') {
        return this.generator.create('extension call', {
            'args': args,
            'named args': namedArgs
        });
    }
}

}; // End namespace

