/**
 * Copyright 2012 University of Massachusetts Amherst
 * 
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 * 
 *   http://www.apache.org/licenses/LICENSE-2.0
 *   
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
*/
package com.googlecode.clearnlp.tokenization;

import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.carrotsearch.hppc.ObjectIntOpenHashMap;
import com.googlecode.clearnlp.morphology.MPLib;
import com.googlecode.clearnlp.util.Span;
import com.googlecode.clearnlp.util.UTArray;
import com.googlecode.clearnlp.util.pair.IntIntPair;

/**
 * @since 1.1.0
 * @author Jinho D. Choi ({@code jdchoi77@gmail.com})
 */
public class EnglishTokenizer extends AbstractTokenizer
{
	private static final Logger LOG = LoggerFactory.getLogger(EnglishTokenizer.class);
	
	protected final String F_DIR			= "tokenize/";
	protected final String F_EMOTICONS		= F_DIR+"emoticons.txt";
	protected final String F_ABBREVIATIONS	= F_DIR+"abbreviations.txt";
	protected final String F_HYPHENS		= F_DIR+"hyphens.txt";
	protected final String F_COMPOUNDS		= F_DIR+"compounds.txt";
	protected final String F_UNITS			= F_DIR+"units.txt";
	protected final String F_MICROSOFT		= F_DIR+"microsoft.txt";
	
	protected final String S_DELIM			= " ";
	protected final String S_PROTECTED		= "PR0T_";
	protected final String S_D0D			= "_DPPD_";
	protected final String S_HYPHEN			= "_HYYN_";
	protected final String S_AMPERSAND		= "_APSD_";
	protected final String S_APOSTROPHY		= "_AOOR_";
	protected final int    N_PROTECTED		= S_PROTECTED.length();
	
	protected final Pattern P_DELIM			= Pattern.compile(S_DELIM);
	protected final Pattern P_HYPHEN		= Pattern.compile("-");
	protected final Pattern P_ABBREVIATION	= Pattern.compile("^(\\p{Alpha}\\.)+\\p{Alpha}?$");
	protected final String[] A_D0D = {".",",",":","-","/","'"};
	
	protected Retokenizer   R_URL;
	protected Retokenizer   R_ABBREVIATION;
	protected Retokenizer   R_PERIOD_LIKE;
	protected Retokenizer   R_MARKER;
	protected Retokenizer   R_APOSTROPHY;
	protected Retokenizer   R_USDOLLAR;
	protected Retokenizer   R_AMPERSAND;
	protected Retokenizer   R_WAW;
	protected Retokenizer   R_PUNCTUATION_PRE;
	protected Retokenizer   R_PUNCTUATION_POST;
	protected Retokenizer[] R_D0D;
	protected Retokenizer[] R_UNIT;
	
	protected Set<String>					T_EMOTICONS;
	protected Set<String>					T_ABBREVIATIONS;
	protected Pattern						P_HYPHEN_LIST;
	protected ObjectIntOpenHashMap<String>	M_D0D;
	protected ObjectIntOpenHashMap<String>	M_COMPOUNDS;
	protected List<IntIntPair[]>			L_COMPOUNDS;
	protected Pattern[]						P_RECOVER_D0D;
	protected Pattern						P_RECOVER_DOT;
	protected Pattern						P_RECOVER_HYPHEN;
	protected Pattern						P_RECOVER_APOSTROPHY;
	protected Pattern						P_RECOVER_AMPERSAND;

	public EnglishTokenizer(ZipInputStream zin)
	{
		initReplacers();
		initMapsD0D();
		initPatterns();
		
		try
		{
			initDictionaries(zin);
		}
		catch (Exception e) {e.printStackTrace();}
	}
	
	public List<FlaggedToken> getTokenList(String str)
	{
		List<FlaggedToken> lTokens = tokenizeWhiteSpaces(str);

		protectEmoticons(lTokens);
		lTokens = tokenizePatterns(lTokens, R_URL);
		lTokens = tokenizePatterns(lTokens, R_ABBREVIATION);
		lTokens = tokenizePatterns(lTokens, R_PERIOD_LIKE);
		lTokens = tokenizePatterns(lTokens, R_MARKER);
		lTokens = tokenizePatterns(lTokens, R_USDOLLAR);
		for (Retokenizer r : R_D0D) replaceProtects(lTokens, r);
		replaceHyphens(lTokens);
		lTokens = tokenizePatterns(lTokens, R_PUNCTUATION_PRE);
		protectAbbreviations(lTokens);
		protectFilenames(lTokens);
		
		lTokens = tokenizeCompounds(lTokens);
		lTokens = tokenizePatterns(lTokens, R_APOSTROPHY);
		replaceProtects(lTokens, R_AMPERSAND);
		replaceProtects(lTokens, R_WAW);
		for (Retokenizer r : R_UNIT) lTokens = tokenizePatterns(lTokens, r);
		lTokens = tokenizePatterns(lTokens, R_PUNCTUATION_POST);
		
		int i, size = P_RECOVER_D0D.length;
		for (i=0; i<size; i++)	recoverPatterns(lTokens, P_RECOVER_D0D[i], A_D0D[i]);
		recoverPatterns(lTokens, P_RECOVER_HYPHEN, "-");
		recoverPatterns(lTokens, P_RECOVER_APOSTROPHY, "'");
		recoverPatterns(lTokens, P_RECOVER_AMPERSAND, "&");
		
		for (FlaggedToken fTok : lTokens) 
		{
			if (!fTok.tok.getSpan().hasValues())
				fTok.tok.setSpan(new Span(1000, 1001));
			LOG.trace("Token '{}' has span {}", fTok.tok, fTok.tok.getSpan());
		}
		return lTokens;
	}
	
	/** Called by {@link EnglishTokenizer#EnglishTokenizer(ZipInputStream)}. */
	private void initReplacers()
	{
		R_URL          = new RetokenizerOne(MPLib.URL_SPAN);
		R_ABBREVIATION = new RetokenizerOnePlus("(^(\\p{Alpha}\\.)+)(\\p{Punct}*$)");
		R_PERIOD_LIKE  = new RetokenizerOne("(\\.|\\?|\\!){2,}");
		R_MARKER       = new RetokenizerOne("\\-{2,}|\\*{2,}|\\={2,}|\\~{2,}|\\,{2,}|\\`{2,}|\\'{2,}");
		R_APOSTROPHY   = new RetokenizerOne("(?i)((\\')(s|d|m|ll|re|ve|nt)|n(\\')t)$");
		R_USDOLLAR     = new RetokenizerOne("^US\\$");
		R_AMPERSAND    = getReplacerAmpersand();
		R_WAW          = getReplacerWAWs();
		
		R_PUNCTUATION_PRE  = new RetokenizerOne("\\(|\\)|\\[|\\]|\\{|\\}|<|>|\\,|\\:|\\;|\\\"");
		R_PUNCTUATION_POST = new RetokenizerOne("\\.|\\?|\\!|\\`|\\'|\\-|\\/|\\@|\\#|\\$|\\%|\\&|\\|");
		
		initReplacersD0Ds();
	}
	
	private Retokenizer getReplacerAmpersand()
	{
		return new RetokenizerInternalPunct("(\\p{Upper})(\\&)(\\p{Upper})", S_AMPERSAND);
	}
	
	private class RetokenizerInternalPunct extends Retokenizer {
		private String replacement;
		protected RetokenizerInternalPunct(String regex, String punctReplacement) {
			super(regex);
			replacement = punctReplacement;
		}

		@Override
		protected Collection<FlaggedToken> flaggedTokensFromMatcher(
				Matcher matcher) {
			Collection<FlaggedToken> flToks = new ArrayList<FlaggedToken>();
			Token subToken = new Token(matcher.group(1) + replacement + matcher.group(3), 
					matcher.start(), matcher.end());
			flToks.add(new FlaggedToken(subToken, true));
			return flToks;
		}
		
	}
	
	/** Called by {@link EnglishTokenizer#initReplacers()}. */
	private Retokenizer getReplacerWAWs()
	{
		return new RetokenizerInternalPunct("(\\w)(\\')(\\w)", S_APOSTROPHY);
	}
	
	/** Called by {@link EnglishTokenizer#initReplacers()}. */
	private void initReplacersD0Ds()
	{
		String[] regex = {"(^|\\p{Alnum})(\\.)(\\d)", "(\\d)(,|:|-|\\/)(\\d)", "(^)(\\')(\\d)", "(\\d)(\\')(s)"};
		int i, size = regex.length;
		
		R_D0D = new Retokenizer[size];
		
		for (i=0; i<size; i++)
			R_D0D[i] = new RetokenizerD0D(regex[i]);
	}
	
	/** Called by {@link EnglishTokenizer#EnglishTokenizer(ZipInputStream)}. */
	private void initMapsD0D()
	{
		M_D0D = new ObjectIntOpenHashMap<String>();
		int i, size = A_D0D.length;
		
		for (i=0; i<size; i++)
			M_D0D.put(A_D0D[i], i);
	}
	
	private void initPatterns()
	{
		int i, size = A_D0D.length;
		P_RECOVER_D0D = new Pattern[size];
		
		for (i=0; i<size; i++)
			P_RECOVER_D0D[i] = Pattern.compile(S_D0D+i+"_");
			
		P_RECOVER_HYPHEN     = Pattern.compile(S_HYPHEN);
		P_RECOVER_APOSTROPHY = Pattern.compile(S_APOSTROPHY);
		P_RECOVER_AMPERSAND  = Pattern.compile(S_AMPERSAND);
	}
	
	/** Called by {@link EnglishTokenizer#EnglishTokenizer(ZipInputStream)}. */
	private void initDictionaries(ZipInputStream zin) throws Exception
	{
		ZipEntry zEntry;
		String filename;
		
		while ((zEntry = zin.getNextEntry()) != null)
		{
			filename = zEntry.getName();
			
			if (filename.equals(F_EMOTICONS))
				T_EMOTICONS = getSet(zin);
			else if (filename.equals(F_ABBREVIATIONS))
				T_ABBREVIATIONS = getSet(zin);
			else if (filename.equals(F_HYPHENS))
				P_HYPHEN_LIST = getHyphenPatterns(zin);
			else if (filename.equals(F_COMPOUNDS))
				initDictionariesComounds(zin);
			else if (filename.equals(F_UNITS))
				initDictionariesUnits(zin);
		}
		
		zin.close();
	}
	
	/** Called by {@link EnglishTokenizer#initDictionaries(ZipInputStream)}. */
	private Set<String> getSet(ZipInputStream zin) throws Exception
	{
		BufferedReader fin = new BufferedReader(new InputStreamReader(zin));
		Set<String> set = new HashSet<String>();
		String line;
		
		while ((line = fin.readLine()) != null)
			set.add(line.trim());
		
		return set;
	}
	
	/** Called by {@link EnglishTokenizer#initDictionaries(ZipInputStream)}. */
	private Pattern getHyphenPatterns(ZipInputStream zin) throws Exception
	{
		BufferedReader fin = new BufferedReader(new InputStreamReader(zin));
		StringBuilder build = new StringBuilder();
		String line;
		
		while ((line = fin.readLine()) != null)
		{
			build.append("|");
			build.append(line.trim());
		}
		
		return Pattern.compile(build.substring(1));
	}
	
	/** Called by {@link EnglishTokenizer#initDictionaries(ZipInputStream)}. */
	private void initDictionariesComounds(ZipInputStream zin) throws Exception
	{
		BufferedReader fin = new BufferedReader(new InputStreamReader(zin));
		M_COMPOUNDS = new ObjectIntOpenHashMap<String>();
		L_COMPOUNDS = new ArrayList<IntIntPair[]>();
		
		int i, j, len, bIdx, eIdx;
		IntIntPair[] p;
		String[] tmp;
		String line;
		
		for (i=1; (line = fin.readLine()) != null; i++)
		{
			tmp = P_DELIM.split(line.trim());
			len = tmp.length;
			p   = new IntIntPair[len];
			
			M_COMPOUNDS.put(UTArray.join(tmp, ""), i);
			L_COMPOUNDS.add(p);
			
			for (j=0,bIdx=0; j<len; j++)
			{
				eIdx = bIdx + tmp[j].length();
				p[j] = new IntIntPair(bIdx, eIdx);
				bIdx = eIdx;
			}
		}
	}
	
	/** Called by {@link EnglishTokenizer#initDictionaries(ZipInputStream)}. */
	private void initDictionariesUnits(ZipInputStream zin) throws Exception
	{
		BufferedReader fin = new BufferedReader(new InputStreamReader(zin));
		String signs       = fin.readLine().trim();
		String currencies  = fin.readLine().trim();
		String units       = fin.readLine().trim();
		
		R_UNIT = new Retokenizer[4];
		
		R_UNIT[0] = new RetokenizerTwo("^(?i)(\\p{Punct}*"+signs+")(\\d)");
		R_UNIT[1] = new RetokenizerTwo("^(?i)(\\p{Punct}*"+currencies+")(\\d)");
		R_UNIT[2] = new RetokenizerTwo("(?i)(\\d)("+currencies+"\\p{Punct}*)$");
		R_UNIT[3] = new RetokenizerTwo("(?i)(\\d)("+units+"\\p{Punct}*)$");
	}
	
	/** Called by {@link EnglishTokenizer#getTokenList(String)}. */
	protected List<FlaggedToken> tokenizeWhiteSpaces(String str)
	{
		List<FlaggedToken> tokens = new ArrayList<FlaggedToken>();
		
		Matcher matcher = MPLib.getNonWhitespaceMatcher(str);
		
		while (matcher.find()) 
		{
			int start = matcher.start();
			int end = matcher.end();
			Token token = new Token(str.substring(start, end), start);
			LOG.trace("Found token '{}' at span {}", token, token.getSpan());
			tokens.add(new FlaggedToken(token, false));
		}
		LOG.trace("Whitespace tokenization done; now have {} tokens", tokens.size());
		return tokens;
	}
	
	/** Called by {@link EnglishTokenizer#getTokenList(String)}. */
	protected void protectEmoticons(List<FlaggedToken> tokens)
	{
		for (FlaggedToken token : tokens)
		{
			if (T_EMOTICONS.contains(token.tok))
				token.flag = true;
		}
	}
	
	/** Called by {@link EnglishTokenizer#getTokenList(String)}. */
	protected void protectAbbreviations(List<FlaggedToken> tokens)
	{
		String lower;
		
		for (FlaggedToken flToken : tokens)
		{
			lower = flToken.tok.getText().toLowerCase();
			
			if (T_ABBREVIATIONS.contains(lower) || P_ABBREVIATION.matcher(lower).find())
				flToken.flag = true;
		}
	}
	
	/** Called by {@link EnglishTokenizer#getTokenList(String)}. */
	protected void protectFilenames(List<FlaggedToken> tokens)
	{
		String lower;
		
		for (FlaggedToken token : tokens)
		{
			lower = token.tok.getText().toLowerCase();
			
			if (MPLib.FILE_EXTS.matcher(lower).find())
				token.flag = true;
		}
	}
	
	protected void replaceProtects(List<FlaggedToken> tokens, Retokenizer retok)
	{
		for (int i = 0 ; i < tokens.size(); i++)
		{
			FlaggedToken flToken = tokens.get(i);
			if (!flToken.flag)
			{
				List<FlaggedToken> replacements = retok.apply(flToken);
				tokens.set(i, replacements.get(0)); // shortcut for common (only?) case
				// insert other replacements (probably not used) 
				if (replacements.size() > 1)
					tokens.addAll(i + 1, replacements.subList(1, replacements.size())); 
			}
		}
	}
	
	protected void replaceHyphens(List<FlaggedToken> tokens)
	{
		for (FlaggedToken flToken : tokens)
		{
			if (!flToken.flag && P_HYPHEN_LIST.matcher(flToken.tok.getText().toLowerCase()).find())
				flToken.tok.setText(P_HYPHEN.matcher(flToken.tok.getText()).replaceAll(S_HYPHEN));
		}
	}
	
	protected void recoverPatterns(List<FlaggedToken> tokens, Pattern p, String replacement)
	{
		for (FlaggedToken flToken : tokens)
			flToken.tok.setText(p.matcher(flToken.tok.getText()).replaceAll(replacement));
	}

	protected String recoverPatterns(String textWithSubs, Pattern p, String replacement)
	{
		return p.matcher(textWithSubs).replaceAll(replacement);
	}
	
	protected List<FlaggedToken> tokenizeCompounds(List<FlaggedToken> oTokens)
	{
		List<FlaggedToken> nTokens = new ArrayList<FlaggedToken>();
		int idx;
		
		for (FlaggedToken oToken : oTokens)
		{
			if (oToken.flag || (idx = M_COMPOUNDS.get(oToken.tok.getText().toLowerCase()) - 1) < 0)
				nTokens.add(oToken);
			else
			{
				for (IntIntPair p : L_COMPOUNDS.get(idx))
				{
					Token newTok = new Token(oToken.tok.getText().substring(p.i1, p.i2), 
							oToken.tok.getSpan().begin);
					nTokens.add(new FlaggedToken(newTok, true));
				}
			}
		}
		
		return nTokens;
	}
	
	/** Called by {@link EnglishTokenizer#getTokenList(String)}. */
	protected List<FlaggedToken> tokenizePatterns(List<FlaggedToken> oTokens, Retokenizer retok)
	{
		List<FlaggedToken> nTokens = new ArrayList<FlaggedToken>();
		LOG.trace("Applying tokenization pattern {}", retok.pattern.pattern());
		for (FlaggedToken oToken : oTokens)
			nTokens.addAll(retok.apply(oToken));
		LOG.trace("There are now {} tokens" , oTokens.size());
		return nTokens;
	}
	
	abstract class Retokenizer {
		protected Pattern pattern;
		protected Retokenizer(String regex) 
		{
			pattern = Pattern.compile(regex);
		}

		public Retokenizer(Pattern pattern) 
		{
			this.pattern = pattern;
		}
		
		protected List<FlaggedToken> apply(FlaggedToken original) 
		{
			// XXX: fails on BioNLP-ST-2013_GE_devel_data_rev2/PMC-3333881-02-MATERIALS_AND_METHODS.txt.parsed
			// some char spans end up overlapping.
			List<FlaggedToken> newToks = new ArrayList<FlaggedToken>();
			LOG.trace("Checking token '{}'@{} ", original.tok, original.tok.getSpan());
			if (original.flag)
			{
				LOG.trace("Token '{}' is marked as processed; not regex-matching", original.tok);
				newToks.add(original);
				return newToks;
			}
			Token origTok = original.tok;
			Matcher matcher = pattern.matcher(origTok.getText());
			int lastMatchEnd = 0;
			int adjustment = 0;
			int offset = origTok.getSpan().begin;
			while (matcher.find())
			{
				LOG.trace("Found match '{}' at span {}:{}", matcher.group(), matcher.start(), matcher.end());
				if (matcher.start() > lastMatchEnd)
				{
					// In certain cases, the remapping of chars e.g. '-' -> '_HYYN_' causes
					// inaccurate counts. We assume that the Retokenizer's pattern
					// *doesn't* have such characters to avoid this. 
					String precedingTokText = origTok.getText().substring(lastMatchEnd, matcher.start());
					int actualLength = getLengthWithoutSubs(precedingTokText); 
					Token precedingTok = new Token(precedingTokText, lastMatchEnd,
							lastMatchEnd + actualLength);
					adjustment += precedingTok.getText().length() - actualLength;
					LOG.trace("Original token is '{}'@{}", origTok, origTok.getSpan());
					// postAdjustment = adjustment + 
					//		origTok.getSpan().end - origTok.getSpan().begin - origTok.getText().length();
					//LOG.trace("Adjustments for new token are {} (before) and {} (after)",
					//		adjustment, postAdjustment);
					precedingTok.getSpan().addOffset(offset);
					LOG.trace("Inserting prefix '{}' of match at span {}", precedingTokText, precedingTok.getSpan());
					newToks.add(new FlaggedToken(precedingTok, false));
				}
				Collection<FlaggedToken> flToks = flaggedTokensFromMatcher(matcher);
				for (FlaggedToken flTok : flToks)
				{
					flTok.tok.getSpan().addOffset(offset + adjustment);
					LOG.trace("Adding replacement token '{}' at span {}", flTok.tok, flTok.tok.getSpan());
					if (!flTok.tok.getText().isEmpty())
						newToks.add(flTok);
				}
				lastMatchEnd = matcher.end();
			}
			if (lastMatchEnd < original.tok.getText().length())
			{
				if (lastMatchEnd == 0)
				{
					LOG.trace("Token '{}'@{} is unchanged", original.tok, original.tok.getSpan());
					newToks.add(original);
				}
				else
				{
					String trailingTokText = original.tok.getText().substring(lastMatchEnd);
					Token trailingTok = new Token(trailingTokText, 
							lastMatchEnd, lastMatchEnd + getLengthWithoutSubs(trailingTokText));
					trailingTok.getSpan().addOffset(offset + adjustment);
					LOG.trace("Inserting trailing text '{}' at span {}", trailingTokText, trailingTok.getSpan());
					newToks.add(new FlaggedToken(trailingTok, false));
				}
			}
			return newToks;
		}

		protected abstract Collection<FlaggedToken> flaggedTokensFromMatcher(
				Matcher matcher);
	}
	
	private class RetokenizerOne extends Retokenizer {

		protected RetokenizerOne(String pattern) {
			super(pattern);
		}

		public RetokenizerOne(Pattern pattern) {
			super(pattern);
		}

		@Override
		protected Collection<FlaggedToken> flaggedTokensFromMatcher(
				Matcher matcher) {
			Collection<FlaggedToken> flToks = new ArrayList<FlaggedToken>();
			Token subToken = new Token(matcher.group(), matcher.start());
			flToks.add(new FlaggedToken(subToken, true));
			return flToks;
		}
		
	}
	
	private class RetokenizerTwo extends Retokenizer {

		protected RetokenizerTwo(String pattern) {
			super(pattern);
		}

		@Override
		protected Collection<FlaggedToken> flaggedTokensFromMatcher(
				Matcher matcher) {
			Collection<FlaggedToken> flToks = new ArrayList<FlaggedToken>();
			for (int i = 1; i <= 2; i++)
			{
				Token subToken = new Token(matcher.group(i), matcher.start(i));
				flToks.add(new FlaggedToken(subToken, false));
			}
			return flToks;
		}
		
	}
	
//	private class SubstitutionOne implements Substitution
//	{
//		@Override
//		public void appendSubstitution(MatchResult match, TextBuffer dest)
//		{
//			dest.append(S_DELIM);
//			dest.append(S_PROTECTED);
//			dest.append(match.group(0));
//			dest.append(S_DELIM);
//		}
//	}
//	
//	private class SubstitutionTwo implements Substitution
//	{
//		@Override
//		public void appendSubstitution(MatchResult match, TextBuffer dest)
//		{
//			dest.append(match.group(1));
//			dest.append(S_DELIM);
//			dest.append(match.group(2));
//		}
//	}
//	
	private class RetokenizerD0D extends Retokenizer 
	{
		protected RetokenizerD0D(String pattern) {
			super(pattern);
		}

		@Override
		protected Collection<FlaggedToken> flaggedTokensFromMatcher(
				Matcher matcher) {
			Collection<FlaggedToken> flToks = new ArrayList<FlaggedToken>();
			String contents = matcher.group(1) 
					+ S_D0D + M_D0D.get(matcher.group(2)) + "_"
					+ matcher.group(3);
			Token tok = new Token(contents, matcher.start(1), matcher.end(3));
			flToks.add(new FlaggedToken(tok, false));
			return flToks;
		}
	}
	
	private class RetokenizerOnePlus extends Retokenizer 
	{

		protected RetokenizerOnePlus(String pattern) {
			super(pattern);
		}

		@Override
		protected Collection<FlaggedToken> flaggedTokensFromMatcher(
				Matcher matcher) {
			Collection<FlaggedToken> flToks = new ArrayList<FlaggedToken>();
			Token first = new Token(matcher.group(1), matcher.start(1));
			flToks.add(new FlaggedToken(first, true));
			if (matcher.group(3).length() > 0) {
				Token second = new Token(matcher.group(3), matcher.start(3));
				flToks.add(new FlaggedToken(second, false));
			}
			return flToks;		
		}
		
	}


    protected int getLengthWithoutSubs(String textWithSubs) {
		// erg - what an inefficient way to do this, but refactoring
		// is too hard at this stage
		String restored = textWithSubs;
		for (int i = 0; i < P_RECOVER_D0D.length; i++)	
			restored = recoverPatterns(restored, P_RECOVER_D0D[i], A_D0D[i]);
		restored = recoverPatterns(restored, P_RECOVER_HYPHEN, "-");
		restored = recoverPatterns(restored, P_RECOVER_APOSTROPHY, "'");
		restored = recoverPatterns(restored, P_RECOVER_AMPERSAND, "&");
		return restored.length();
	}

//	private class SubstitutionD0D implements Substitution
//	{
//		@Override
//		public void appendSubstitution(MatchResult match, TextBuffer dest)
//		{
//			dest.append(match.group(1));
//			dest.append(S_D0D+M_D0D.get(match.group(2))+"_");
//			dest.append(match.group(3));
//		}
//	}
//	
//	private class SubstitutionOnePlus implements Substitution
//	{
//		@Override
//		public void appendSubstitution(MatchResult match, TextBuffer dest)
//		{
//			dest.append(S_DELIM);
//			dest.append(S_PROTECTED);
//			dest.append(match.group(1));
//			dest.append(S_DELIM);
//			dest.append(match.group(3));
//		}
//	}
}

