package org.hawk.gwt.ppc.parser.impl;

import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;

import org.hawk.gwt.ppc.parser.Descriptor;
import org.hawk.gwt.ppc.parser.JavaKeywords;
import org.hawk.gwt.ppc.parser.PreprocessorParseException;
import org.hawk.gwt.ppc.parser.Token;
import org.hawk.gwt.ppc.utils.PreprocessorException;

/**
 * Common methods for descriptor parsing.
 * 
 * @author alex.bereznevatiy@gmail.com
 *
 */
abstract class DescriptorParserUtils implements DescriptorParser {
	
	static final Set<String> MODIFIERS;
	
	static {
		Set<String> set = new HashSet<String>();
		
		set.add(JavaKeywords.PUBLIC);
		set.add(JavaKeywords.PROTECTED);
		set.add(JavaKeywords.PRIVATE);
		set.add(JavaKeywords.ABSTRACT);
		set.add(JavaKeywords.FINAL);
		set.add(JavaKeywords.VOLATILE);
		set.add(JavaKeywords.STRICTFP);
		set.add(JavaKeywords.SYNCHRONIZED);
		set.add(JavaKeywords.STATIC);
		set.add(JavaKeywords.TRANSIENT);
		set.add(JavaKeywords.NATIVE);
		
		MODIFIERS = Collections.unmodifiableSet(set);
	}
	
	private DescriptorParserUtils() {}
	
	/**
	 * The same as {@link TokenBasedJavaParser#expect(Tokenizer, DescriptorType...)},
	 * but skips all comments before returning. Also stores all descriptors parsed in the
	 * result list if it is not null.
	 * @param parser
	 * @param tokenizer
	 * @param result
	 * @param expectedTypes
	 * @return next descriptor or null
	 * @throws PreprocessorException in case of troubles or unexpected descriptor
	 */
	static Descriptor expect(TokenBasedJavaParser parser, TokenSource tokenizer, 
			List<Descriptor> resultList, DescriptorType...expectedTypes) throws PreprocessorException {
		Descriptor result = parser.expect(tokenizer, expectedTypes);
		if (resultList != null && result != null) {
			resultList.add(result);
		}
		
		return result;
	}
	
	/**
	 * Looks for next token if it exists. Set the tokenizer in the correct state.
	 * TODO: perhaps, move this method to tokenizer?
	 * @param tokenizer - tokenizer to analyze
	 * @param allowEOF - throw exception in case it is <code>false</code>.
	 * @throws PreprocessorException
	 */
	static void lookupNext(TokenSource tokenizer, boolean allowEOF) throws PreprocessorException {
		if (!tokenizer.hasNext()) {
			if (!allowEOF) {
				throw new PreprocessorParseException("Unexpected end of file", tokenizer.current());
			}
			tokenizer.eof();
		} else {
			tokenizer.next();
		}
	}
	
	/**
	 * Looks for next non-comment token if it exists. Set the tokenizer in the correct state.
	 * @param tokenizer - tokenizer to analyze
	 * @param allowEOF - throw exception in case it is <code>false</code>.
	 * @throws PreprocessorException
	 */
	static void lookupNextNonComment(TokenSource tokenizer, boolean allowEOF) throws PreprocessorException {
		if (!tokenizer.hasNext()) {
			if (!allowEOF) {
				throw new PreprocessorParseException("Unexpected end of file", tokenizer.current());
			}
			tokenizer.eof();
			return;
		}
		Token tok = tokenizer.next();
		if (tok.isComment()) {
			lookupNextNonComment(tokenizer, allowEOF);
		}
	}

	/**
	 * Reads all available modifiers from 
	 * @param modifiersList
	 * @param tokenizer
	 * @throws PreprocessorException 
	 */
	static void readModifiers(List<String> modifiersList, TokenSource tokenizer) throws PreprocessorException {
		String modifier;
		while (MODIFIERS.contains(modifier = tokenizer.current().toString())) {
			modifiersList.add(modifier);
			lookupNext(tokenizer, false);
		}
	}
	
	/**
	 * Reads type name with possible type parameters from tokenizer.
	 * Stores result in first parameter.
	 * Tokenizer should point to the first token in the name.
	 * @param result
	 * @param tokenizer
	 * @param allowArray
	 * @throws PreprocessorException 
	 */
	static void readTypeName(StringBuilder result, TokenSource tokenizer, boolean allowArray) throws PreprocessorException {
		tokenizer.current().writeTo(result);
		lookupNext(tokenizer, true);
		if (tokenizer.isEof()) {
			return;
		}
		while (tokenizer.current().equalsTo('.')) {
			Token tok = tokenizer.current();
			lookupNext(tokenizer, false);
			if (tokenizer.current().equalsTo('.')) {
				return;//var args parameter
			}
			tok.writeTo(result);
			if (!tokenizer.current().isWord()) {
				throw new PreprocessorParseException("Illegal type name", tokenizer.current());
			}
			tokenizer.current().writeTo(result);
			lookupNext(tokenizer, true);
			if (tokenizer.isEof()) {
				return;
			}
		}
		readTypeParameters(result, tokenizer);
		if (!allowArray) {
			return;
		}
		while (!tokenizer.isEof() && tokenizer.current().equalsTo('[')) {
			result.append(tokenizer.current());
			lookupNextNonComment(tokenizer, false);
			if (!tokenizer.current().equalsTo(']')) {
				throw new PreprocessorParseException(tokenizer.current());
			}
			result.append(tokenizer.current());
			lookupNextNonComment(tokenizer, true);
		}
	}

	/**
	 * Reads type parameters from the tokenizer if any.
	 * Stores result in first parameter.
	 * If there are no type parameters in the result - do nothing.
	 * @param result
	 * @param tokenizer
	 * @throws PreprocessorException 
	 */
	static void readTypeParameters(StringBuilder result, TokenSource tokenizer) throws PreprocessorException {
		if (!tokenizer.current().equalsTo('<')) {
			return;
		}
		tokenizer.current().writeTo(result);
		int deep = 1;
		
		while (deep > 0) {
			lookupNext(tokenizer, false);
			Token tok = tokenizer.current();
			if (tok.equalsTo('&') || tok.equalsTo(JavaKeywords.EXTENDS) ||
					tok.equalsTo(JavaKeywords.SUPER)) {
				result.append(' ');
				tok.writeTo(result);
				result.append(' ');
			} else {
				tok.writeTo(result);
			}
			if (tok.equalsTo('<')) {
				deep++;
			} else if (tok.equalsTo('>')) {
				deep--;
			} else if (tok.equalsTo(">>")) {
				deep -= 2;
			} else if (tok.equalsTo(">>>")) {
				deep -= 3;
			}
		}
		
		if (deep < 0) {
			throw new PreprocessorParseException("Illegal token", tokenizer.current());
		}
		
		lookupNext(tokenizer, true);
	}
}
