package rerac.components;

import java.io.BufferedOutputStream;
import java.io.BufferedReader;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import goalie.Component;

import rerac.feature.BreakLevel;
import rerac.protos.Corpus.Document;
import rerac.protos.Corpus.Document.Annotation;
import rerac.protos.Corpus.Document.Method;
import rerac.protos.Corpus.Document.Token;

/**
 * This reads in a tokenized and white space separated corpus, one sentence per
 * line, such as the one used by Doug Downey for relation extraction.
 * The output are the sentences of the corpus as Document messages, each 
 * containing one sentence.
 * 
 * @author ben
 *
 */
public class ReadTokenizedText implements Component {
  public static final String REFERRED_NAME = "read_tokenized_text";
  
	// Pattern to recognize classes of wild-carded unknowns.
	static final Pattern UNK_PATTERN = Pattern.compile("unk[0-9]+");
	static final Pattern SPACE_PATTERN = Pattern.compile("\\s+");
	// The number of letters to be checked for equality of two sentences.
	static final int CHECKED_PREFIX_SIZE = 40;
	// Maximal number of unkown tokens per sentence.
	static final int MAX_UNKNOWNS = 4;
  
	
	private static int approximateHash(String s) {
		return s.substring(0, Math.min(CHECKED_PREFIX_SIZE, s.length())).toLowerCase()
		    .hashCode();
	}
	
	private static boolean valid(String s) {
		Matcher m = UNK_PATTERN.matcher(s);
		int matches = 0;
		while (m.find()) {
			++matches;
		}
		return matches < MAX_UNKNOWNS + 1;
	}
	
  @Override
  public void cleanup(Map<String, String> outputs) throws IOException {
    File f = new File(outputs.get("output"));
    f.delete();
  }

  @Override
  public Map<String, String> run(Map<String, String> params) throws IOException {
    String inputFN = params.get("input");
    String outputFN = params.containsKey("output_destination") ? 
        params.get("output_destination") : inputFN + ".docpb";
    
    // To Exclude duplicates (and near duplicates):
    Set<Integer> prefixHashes = new HashSet<Integer>();
    BufferedReader br = new BufferedReader(new FileReader(inputFN));
    DataOutputStream output = new DataOutputStream(new BufferedOutputStream(
        new FileOutputStream(outputFN)));

    for (String line; (line = br.readLine()) != null; ) {
      if (!prefixHashes.contains(approximateHash(line)) && valid(line)) {
        Document.Builder doc = Document.newBuilder();
        int breakLevelIdx = doc.getMethodCount();
        Method method = Method.newBuilder().setId(BreakLevel.class.getName())
          .setType(BreakLevel.TYPE).build();
        doc.addMethod(method);
        prefixHashes.add(approximateHash(line));
        for (String tokenStr : SPACE_PATTERN.split(line)) {
          Token.Builder tok = Token.newBuilder().setText(tokenStr);
          Annotation.Builder breakLevel = Annotation.newBuilder();
          breakLevel.setMethodIndex(breakLevelIdx);
          breakLevel.setType(BreakLevel.TYPE);
          if (doc.getTokenCount() > 0) {
            breakLevel.setVal(1);
          } else {
            breakLevel.setVal(2);           
          }
          tok.addAnnotation(breakLevel.build());
          doc.addToken(tok.build());          
        }
        Document finishedDoc = doc.build();
        finishedDoc.writeDelimitedTo(output);
      }
    }
    br.close();
    output.close();
    
    Map<String, String> outMap = new HashMap<String, String>();
    outMap.put("output", outputFN);
    return outMap;
  }
}
