// Copyright 2014 The Go Circuit Project
// Use of this source code is governed by the license for
// The Go Circuit Project, found in the LICENSE file.
//
// Authors:
//   2014 Chris Monson <chris@gocircuit.org>

/* Shell-style argument parsing.

This is not exactly like any of the shells in common use (e.g., Bash). The
basic need is to allow for quoting to make it easy to specify spaces in
atomic string units. Thus, with the Split function, you can easily parse things like

  echo "Hello, World!"

into ["echo", "Hello, World!"].

If you were to just split on spaces, you would instead get ["echo", `"Hello,`, `World!"`],
which is obviously less useful.

In the example above, the quotes surrounding Hello, World! are parsed out and
assumed to be delimiters, not part of the actual argument string.

This package also provides Quote, a way of quoting strings so that they will be
properly parsed by Split.

DivideArgTypes is a helper function for detecting when environment variables end and arguments begin. Since the purpose is to allow specification of process parameters on the command line via space-separated entries, this allows you to specify environment variables the way Bash allows it, by preceding the command with variable settings:

  MYVAR=hello echo $MYVAR

Split produces ["MYVAR=hello", "echo", "$MYVAR"] from the above, and DivideArgTypes
divides that into two slices: ["MYVAR=hello"] and ["echo", "$MYVAR"].

Special characters can be specified using \n, \t, \r, \0, and \\. You can also
literalize any space character by preceding it with a backslash (e.g., `\ ` or
"\\n" or even "\\r\n").
*/
package shargs

import (
	"fmt"
	"regexp"
	"strings"
	"unicode/utf8"
)

var (
	variablePattern = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]*=`)
)

// Divide takes a slice of strings and tries to divide it into env vars and the
// rest of the args. Even though it is not an OS requirement, we assume that
// variable names are parsed by exactly this regular expression:
//		[a-zA-Z_][a-zA-Z0-9_]*
func DivideArgTypes(pieces []string) (vars, args []string) {
	i := 0
	for ; i < len(pieces); i++ {
		if !variablePattern.MatchString(pieces[i]) {
			break
		}
	}
	return pieces[:i], pieces[i:]
}

func Quote(input string) string {
	r := strings.NewReplacer(
		`\`, `\\`,
		`"`, `\"`,
		`'`, `\'`,
		"\r", `\r`,
		"\n", `\n`,
		"\t", `\t`,
		"\x00", `\0`,
	)
	formatStr := "%s"
	if strings.ContainsAny(input, ` `) {
		formatStr = "\"%s\""
	}
	return fmt.Sprintf(formatStr, r.Replace(input))
}

// Split splits a string into a slice of whitespace-delimited strings, allowing
// for quoted units and handling quote escaping.
func Split(input string) ([]string, error) {
	lexer := lex(input)
	var allstrs []string
	var curpieces []string
	// TODO: are there other control characters that we want to allow people to
	// type out this way? Unicode? Supporting things like that will potentially
	// require major lexer changes.
	r := strings.NewReplacer(
		"\\0", "\x00", // null characters are sometimes really useful
		"\\ ", " ",
		"\\\t", "\t", // literal tab preceded by backslash
		"\\\r\n", "\r\n", // crlf pair
		"\\\r", "\r", // literal return preceded by backslash
		"\\\n", "\n", // literal newline preceded by backslash
		"\\n", "\n", // the string `\n`
		"\\r", "\r", // the string `\r`
		"\\t", "\t", // the string `\t`
		"\\'", "'",
		"\\\"", "\"",
		"\\\\", "\\",
	)
	// Ensure that we consume all lexed output, or we might leak a goroutine.
	defer func() {
		for _ = range lexer.items {
		}
	}()
	for item := range lexer.items {
		switch item.typ {
		case itemSpace, itemEOF:
			if len(curpieces) > 0 {
				allstrs = append(allstrs, strings.Join(curpieces, ""))
			}
			curpieces = nil
		case itemBareText, itemSqText, itemDqText:
			curpieces = append(curpieces, r.Replace(item.val))
		case itemError:
			return nil, fmt.Errorf("Arg parsing error: %s", item.val)
		default:
			return nil, fmt.Errorf("Unknown item type for item: %#v", item)
		}
	}
	return allstrs, nil
}

type item struct {
	typ itemType
	pos int
	val string
}

func (i item) String() string {
	switch {
	case i.typ == itemEOF:
		return "EOF"
	case i.typ == itemError:
		return i.val
	}
	return fmt.Sprintf("%q", i.val)
}

type itemType int

const (
	itemError itemType = iota
	itemBareText
	itemDqText
	itemSqText
	itemSpace
	itemEOF
)

const eof = -1

type stateFn func(*lexer) stateFn

type lexer struct {
	input string    // complete input
	state stateFn   // next state
	pos   int       // current input position
	start int       // start of this sequence
	width int       // width of last rune read
	items chan item // output channel
}

func (l *lexer) next() rune {
	if l.pos >= len(l.input) {
		l.width = 0
		return eof
	}
	ch, size := utf8.DecodeRuneInString(l.input[l.pos:])
	l.width = size
	l.pos += l.width
	return ch
}

func (l *lexer) peek() rune {
	r := l.next()
	l.backup()
	return r
}

func (l *lexer) ignore() {
	l.start = l.pos
}

func (l *lexer) backup() {
	l.pos -= l.width
}

func (l *lexer) emit(t itemType) {
	l.items <- item{t, l.start, l.input[l.start:l.pos]}
	l.start = l.pos
}

func (l *lexer) errorf(format string, args ...interface{}) stateFn {
	l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
	return nil
}

func lex(input string) *lexer {
	l := &lexer{
		input: input,
		items: make(chan item),
	}
	go l.run()
	return l
}

func (l *lexer) run() {
	for l.state = lexBareText; l.state != nil; {
		l.state = l.state(l)
	}
	close(l.items)
}

// lexBareText scans until reaching whitespace or an unescaped quote.
func lexBareText(l *lexer) stateFn {
	stMap := map[rune]stateFn{
		'\'': lexSqText,
		'"':  lexDqText,
		' ':  lexSpace,
		'\r': lexSpace,
		'\n': lexSpace,
		'\t': lexSpace,
	}
	for ch := l.next(); ch != eof; ch = l.next() {
		if nextState, ok := stMap[ch]; ok {
			l.backup()
			if l.start < l.pos {
				l.emit(itemBareText)
			}
			return nextState
		}
		if ch == '\\' {
			if ch = l.next(); ch == '\r' {
				// Handle the case where we have \r\n and want to escape both
				// with a single backslash.
				if l.peek() == '\n' {
					ch = l.next()
				}
			}
		}
	}
	if l.start < l.pos {
		l.emit(itemBareText)
	}
	l.emit(itemEOF)
	return nil
}

// lexSpace slurps up runs of whitespace. The first character is known to be whitespace.
func lexSpace(l *lexer) stateFn {
	for strings.ContainsRune("\t\r\n ", l.peek()) {
		l.next()
	}
	if l.start < l.pos {
		l.emit(itemSpace)
	}
	return lexBareText
}

// lexSqText gets all text up to the next non-escaped single quote. The first
// character is already known to be a single quote.
func lexSqText(l *lexer) stateFn {
	return lexhelperQuoted(l, '\'', itemSqText)
}

// lexDqText gets all text up to the next non-escaped double quote. The first
// character is already known to be a double quote.
func lexDqText(l *lexer) stateFn {
	return lexhelperQuoted(l, '"', itemDqText)
}

func lexhelperQuoted(l *lexer, quoteChar rune, emitType itemType) stateFn {
	if ch := l.next(); ch != quoteChar {
		return l.errorf("Expected start of %q string, got %q", quoteChar, ch)
	}
	// Skip the leading quote
	l.ignore()
	var ch rune
	for ch = l.next(); ch != eof && ch != quoteChar; ch = l.next() {
		if ch == '\\' {
			ch = l.next()
		}
	}
	if ch != quoteChar {
		return l.errorf("unterminated %c%c-quoted string: %q", quoteChar, quoteChar, l.input[l.start:l.pos])
	}
	// Exclude terminal double quote.
	l.backup()
	l.emit(emitType)
	// Skip the quote.
	l.next()
	l.ignore()
	return lexBareText
}
