import java.util.ArrayList;
import java.util.BitSet;

import java.util.*;

import java.io.*;

//The basic unit of the tree. Current implementation is still rather large.

//Best way to save further space is probably to define an AbstractSuffixNode

//then extend it so that nodes with high branch factor (>1 child nodes) can 

//be differentiated from nodes with a single outgoing branch. Nodes with a

//single outgoing branch constitute most of the nodes in the tree and are

//uninteresting for error correction purposes. Runs of them should be compacted

//in future versions.

class SuffixNode {

	// guess

	private static int _base2int[] = new int[256];

	static {

		for (int i = 0; i < 256; i++) {

			_base2int[i] = -1;

		}

		_base2int['A'] = 0;

		_base2int['C'] = 1;

		_base2int['G'] = 2;

		_base2int['T'] = 3;

	}

	// the parent of this node

	public SuffixNode parent;

	// the beautiful children

	public SuffixNode[] children;

	// the label on the incoming branch

	private byte base;


	// the number of leaves in the subtree rooted at this node

	public int visits;

	// ratio has THREE (3) possible uses!

	// 1: It actually holds a ratio:

	// Let min be index such that children[i].visits is the minimum at this node

	// Let max be index such that children[i].visits is the minimum at this node

	// ratio = max/min (note if only one child, min == max != 0, so we're safe).

	// 2: The read number if this node is a terminal (ie. children == null)

	// 3: If children.length == 1 then ratio contains the base (actually ordinal
	// base: 0,1,2,3) of the only child.

	private float ratio;

	
	// Set by the main routine: Gives the error-correction a clue which nodes are erroneus

	public static int expectedVisitsPerNode;

	public SuffixNode(char base, SuffixNode parent) {

		this.parent = parent;

		this.base = (byte) base;

		this.visits = 0;

	}

	private int base2int(char base) {

		return _base2int[base];

	}

	public SuffixNode getChildren(char base) {

		return getChildren(base2int(base));

	}

	public SuffixNode getChildren(int index) {

		if (children == null) {

			return null;

		}

		if (children.length == 1) {

			if (((int) ratio) == index) {

				return children[0];

			}

			return null;

		}

		return children[index];

	}

	// Creates a child of this node labelled with base if necessary.

	// Updates the number of leaves in the subtree rooted at the child

	// of this node labelled with base.

	public void visitChildren(char base, int readNumber) {

		int k = base2int(base);

		if (isTerminal()) {

			// This node will soon have a child node so is no longer terminal

			children = new SuffixNode[1];

			ratio = k;

			k = 0;

		} else if (children.length == 1) {

			if (((int) ratio) == k) {

				k = 0;

			} else {

				SuffixNode tmp = children[0];

				children = new SuffixNode[4];

				children[(int) ratio] = tmp;

			}

		}

		SuffixNode node = children[k];

		if (node == null) {

			// there is currently no outgoing branch for base,

			// make a new one (with a child at the end of it)

			if (this instanceof RootNode) {

				children[k] = new AlphaSuffixNode(base, this);

			} else {

				children[k] = new SuffixNode(base, this);

			}

			node = children[k];

		}

		// track number of leaves in the subtree rooted at the child

		// of this node which has outgoing branch labelled base

		if (this instanceof RootNode && node instanceof AlphaSuffixNode) {

			((AlphaSuffixNode) node).visit(readNumber);

		} else {

			node.visit();

		}

	}

	// visits the node during tree construction
	
	public void visit() {

		this.visits++;

	}

	public char getBase() {

		return (char) this.base;

	}

//	public int getLevel() {
//
//		return this.level;
//
//	}

	public boolean isTerminal() {

		return (children == null);

	}

	public double getRatio() {

		if (children == null)

			return ratio;

		if (children.length == 1)

			return 1;

		return ratio;

	}

	// If not a terminal: return the max visits value at a child

	// divided by the min (if only one child min == max != 0)

	// Otherwise: return -1;

	public double calculateRatio() {

		if (children == null) {

			return 1;

		} else {

			if (children.length == 1) {

				return 1;

			}

			int min = 1000000;
			int max = 0;

			for (int i = 0; i < 4; i++) {

				if (children[i] != null) {

					int count = children[i].visits;

					if (count < min) {

						min = count;

					}

					if (count > max) {

						max = count;

					}

				}

			}

			ratio = max / min;

			return ratio;

		}

	}

	// aligns the subtree under this node with that of another (neighbourNode)
	// this routine is used to see if a correction of this.base to another base makes sense
	
	public boolean alignSubtree(SuffixNode neighbourNode) {
		if (this.children == null) {
			return true;
		}
		if (neighbourNode == null) {
			return false;
		}
		for (int i = 0; i < 4; i++) {
			SuffixNode nextNode = this.getChildren(i);
			if (nextNode != null) {
				SuffixNode nextNeighbour = neighbourNode.getChildren(i);
				if (nextNeighbour != null) {
					if (nextNode.alignSubtree(nextNeighbour)) {
						return true;
					}
				}
			}
		}
		return false;
	}
	
	public Collection<String> alignSubtree2(SuffixNode neighbourNode){
		
		if (this.children == null) {
			HashSet<String> s = new HashSet<String>();
			s.add(""+(char)this.base);
			return s;
			
		}
		if (neighbourNode == null) {
			return null;
		}
		
		HashSet<String> matchingStrings = new HashSet<String>();
		for (int i = 0; i < 4; i++) {
			SuffixNode nextNode = this.getChildren(i);
			if (nextNode != null) {
				SuffixNode nextNeighbour = neighbourNode.getChildren(i);
				if (nextNeighbour != null) {
					Collection<String> c = nextNode.alignSubtree2(nextNeighbour);
					if (c!=null) {
						//Take collection into this set
						Iterator<String> elements = c.iterator();
						while (elements.hasNext()){
							String s = elements.next();
							matchingStrings.add((char)this.base + s);
						}
					}
				}
			}
		}
		if (matchingStrings.size() > 0)
			return matchingStrings;
		else 
			return null;
		
	}
	
	public Collection<String> alignSubtree3(SuffixNode neighbourNode, int errorTolerance){
		
		if(errorTolerance < 0)
			return null;
		
		if (this.children == null) {
			HashSet<String> s = new HashSet<String>();
			s.add(""+(char)this.base);
			return s;
			
		}
		if (neighbourNode == null) {
			return null;
		}
		
		HashSet<String> matchingStrings = new HashSet<String>();
		for (int i = 0; i < 4; i++) {
			SuffixNode nextNode = this.getChildren(i);
			if (nextNode != null) {
				SuffixNode nextNeighbour;// = neighbourNode.getChildren(i);
				for (int j=0; j<4;j++){
					nextNeighbour = neighbourNode.getChildren(j);
					
					if (nextNeighbour != null) {
						
						Collection<String> c = nextNode.alignSubtree3(nextNeighbour
								, (i==j?errorTolerance:errorTolerance -1));
						if (c!=null) {
							//Take collection into this set
							Iterator<String> elements = c.iterator();
							while (elements.hasNext()){
								String s = elements.next();
								matchingStrings.add((char)this.base + s);
							}
						}
					
					}
				}
				
			}
		}
		if (matchingStrings.size() > 0)
			return matchingStrings;
		else 
			return null;
		
	}

	// for statistic purposes only...
	
	public int countNodes() {
		SuffixNode child = null;
		int subnodes = 0;
		for (int i = 0; i < 4; i++) {
			child = this.getChildren(i);
			if (child != null) {
				subnodes += child.countNodes();
			}
		}
		return subnodes + 1;
	}

}

/* AlphaSuffixNodes is used for all nodes on the level right under the root-nodes.
 * It provides an additional list of reads that pass a node which is used to identify the
 * according reads during error-correction
 */

class AlphaSuffixNode extends SuffixNode {

	int[] readsPassingNode;
	int readIndex;

	public AlphaSuffixNode(char base, SuffixNode parent) {
		super(base, parent);

		this.readsPassingNode = new int[expectedVisitsPerNode];
		this.readIndex = 0;
	}

	// Despite from raising the counter - in a AlphaSuffixNode the reads going
	// into this branch will be collected
	// But only for a fixed amount of expectedVisits - otherwise the whole
	// branch will be considered correct.
	public void visit(int readNumber) {
		super.visit();
		if (readIndex < readsPassingNode.length) {
			readsPassingNode[readIndex] = readNumber;
			readIndex++;
		}
	}

}

/* the rootNodes are each roots of a subtree. 
 * The RootNodes children are AlphaSufixNodes.
 * RootNode.key keeps track of the key which the root has in the hashtable in Subtree.
 */
class RootNode extends SuffixNode {

	String key;

	public RootNode(char base, String key) {
		super(base, null);

		this.key = key;
	}
}

// This is the main class for the algorithm: it builds the tree and performs error-correction.
// builds only the subtree rooted under "prefix" and only the levels passed as a parameter.
class Subtree {
	
	private int[] nodeCounts;
	
	// the prefix that a read has to contain before being taken into this subtree
	private String prefix;
	
	// the levels of the tree which are actually kept track of and later to be analyzed
	private int fromLevel, toLevel;
	
	// the entry point into all the subtrees at "fromLevel" in this subtree
	private Hashtable<String, RootNode> subSubTree;

	// The place where the correctedReads are marked.

	//public static BitSet correctedReads, identifiedReads;

	// Guess...

	private char int2base[] = { 'A', 'C', 'G', 'T' };

	// The initial reads in the same order they are handed to us at

	// construction, which is the same order in which they appear on file.

	// private byte[][] reads;

	// Counts the number of nodes we actually suspected of an error.

	// Modified by examineNode or examineNodeWithTerms, whichever is

	// being used.

	public Subtree(String prefix, int fromLevel, int toLevel, int numReads) {
		this.prefix = prefix;
		this.fromLevel = fromLevel;
		this.toLevel = toLevel;
		if (fromLevel < prefix.length()) {
			System.err
					.println("Cannot build subtree with such a short prefix!");
		}
		this.subSubTree = new Hashtable<String, RootNode>();
		// correctedReads = new BitSet(numReads);
		nodeCounts = new int[90];
	}


    // this is where the action happens...
	public void buildAndProcess() {
		
		
		// all reads are scanned if they fit to our subtree
		for (int readNumber = 0; readNumber < Shrec.reads.length; readNumber++) {
			
			//get the current read
			byte[] read = Shrec.reads[readNumber];
			
			//We do the following twice: once for the read and then it's revcomp
			for(int direction = 0;direction<2; direction ++){
			
				//analyse the read
				for (int i = 0; i < Shrec.reads[readNumber].length - fromLevel; i++) {
					int j = 0;
					while (j < prefix.length()
							&& (char) read[i + j] == prefix.charAt(j)) {
						j++;
					}
					if (j == prefix.length()) {
						// found read which has a suffix matching with my prefix.
						// System.out.println("read "+readNumber+" has matching
						// substring");

						// Extract the Address for the hashtable from read
						String address = new String();
						for (int k = j; k < fromLevel; k++) {
							address += (char) read[i + k];
						}

						// System.out.println("address: "+address);
						// Get entry from hashtable
						SuffixNode node = subSubTree.get(address);
						if (node == null) {

							// SubSubtree doesn't exist yet -> create
							node = new RootNode((char) read[i + fromLevel], address);

							// visit the new root
							node.visit();
							subSubTree.put(address, (RootNode) node);
							// System.out.println("new adress: "+address+", read
							// "+readNumber);

						} else {

							// entry already exists - visit subsubtree
							node.visit();
						}

						// now take the rest of the read and visit or create children under the rootNode
						for (int k = i + fromLevel; (k < read.length)
						&& k < i + toLevel; k++) {

							// note: visitChildren creates a node if necessary.
							node.visitChildren((char) read[k]
							     , (direction==0?2*readNumber : 2*readNumber + 1));
							node = node.getChildren((char) read[k]);
						}

					}

				}
				read = buildComplement(Shrec.reads[readNumber]);
			}

		}
		// System.out.println(readsFound+" total number of reads in subtree");
		// System.out.println("nodes created: "+SuffixNode.totalNodes
		// +" and roots "+subSubTree.size());

		// Walk the tree and correct errors
		Enumeration<RootNode> roots = subSubTree.elements();

		// System.out.println("Number of roots: "+subSubTree.size());
		RootNode root;

		while (roots.hasMoreElements()) {
			root = roots.nextElement();

			double ratio = root.calculateRatio();

			if (ratio > 1.5) {

				// A skew node may be following this root - examine children!

				AlphaSuffixNode node;

				// System.out.println("Examine root "+root.key);

				ArrayList<AlphaSuffixNode> skewNodes = new ArrayList<AlphaSuffixNode>(
						0);
				ArrayList<AlphaSuffixNode> reliableNodes = new ArrayList<AlphaSuffixNode>(
						0);

				// sort children into reliable or suspicious nodes
				for (int i = 0; i < 4; i++) {
					node = (AlphaSuffixNode) root.getChildren(i);
					if (node != null) {
						//<node stats>
						if (node.visits > nodeCounts.length)
							nodeCounts[nodeCounts.length-1]++;
						else nodeCounts[node.visits-1]++;
						//</node stats>
						if (node.visits < SuffixNode.expectedVisitsPerNode) {
							skewNodes.add(node);
						} else {
							reliableNodes.add(node);
						}
					}
				}

				// analyse children...
				for (AlphaSuffixNode skew : skewNodes) {
					for (AlphaSuffixNode reliable : reliableNodes) {
						
						Collection<String> matchingStrings;
						
						// align any suspicious node with all the reliable ones and look for matches
						
							matchingStrings = skew.alignSubtree3(reliable,1);
						
						 
						if (matchingStrings != null) {

							// match found - identify the reads and correct them
							
							int readNumber;
							
							// change all reads that pass the skew-node
							for (int i = 0; i < skew.readIndex; i++) {
								
								if (reliable == null)
									continue;
								
								// get the read
								int readno = skew.readsPassingNode[i];
								String read = new String(readno % 2 == 0?
										Shrec.reads[readno/2] :
										buildComplement(Shrec.reads[readno/2]));
								
								// identify actual position of the error in the read
								int errPos = read.indexOf(prefix+root.key+skew.getBase());
								
								if(errPos < 0)
									continue; // someone messed woth the read in the meantime!?
								else 
									errPos += prefix.length() + root.key.length();
								
								//XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
								
								Iterator<String> possibleMatches = matchingStrings.iterator();
								boolean matchFound = false;
								
								while (possibleMatches.hasNext()){
									
									String s = possibleMatches.next();
									
									if(errPos + s.length() > read.length()){
										
										String endOfRead = read.substring(errPos);
										
										if(s.startsWith(endOfRead)){
											matchFound = true;
											break;
										}
										
									} else if(read.subSequence(errPos, errPos + s.length())
											.equals(s)) {
										matchFound = true;
										break;
										
									}
									
								}
								
								// half readNumber to match with the readNumber in the input-file.
								readNumber = skew.readsPassingNode[i] / 2;
								
								if (matchFound){

									// correct the read
									char correctToBase = reliable.getBase();
									try {
										read = read.substring(0, errPos)
											+ correctToBase
											+ read.substring(errPos + 1);
									} catch (Exception e) {
									}
								
									//XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
								
								
									// half readNumber to match with the readNumber in the input-file.
									//correctedReads.set(readNumber);
									// identifiedReads.set(readNumber);
								
									// correct the read
								
									if(readno % 2 == 0){
										Shrec.reads[readNumber] = read.getBytes();
									} else {
										Shrec.reads[readNumber] = buildComplement(read.getBytes());
										
									}
								}
								
								else {
									
									// no correction found!
									
									//identifiedReads.set(readNumber);
								}

							}
							break;
							// System.out.println("possible correction!");
						} else {
							// half readNumber to match with the readNumber in the input-file.
							//int readNumber = skew.readsPassingNode[i] / 2;
							//identifiedReads.set(readNumber);
							
						}
					}
				}
				
			// no corretions made - analyse the nodes on possible errors without possibilities for correction.	
			} else {
				AlphaSuffixNode node;

				for (int i = 0; i < 4; i++) {

					node = (AlphaSuffixNode) root.getChildren(i);
					
					if(node != null) {
						//<node stats>
						if (node.visits > nodeCounts.length)
							nodeCounts[nodeCounts.length-1]++;
						else nodeCounts[node.visits-1]++;
						//</node stats>
					}

//					if (node != null
//							&& node.visits < SuffixNode.expectedVisitsPerNode) {
//
//						int readNumber;
//
//						for (int k = 0; k < node.readIndex; k++) {
//
//							readNumber = node.readsPassingNode[k] / 2;
//
//							//identifiedReads.set(readNumber);
//
//						}
//
//					}

				}
			}


		}

		// Tree analysis finished: report to the supervisor
		Shrec.supervisor.processFinished(nodeCounts);
	}

	
	
	
	
	public static byte[] buildComplement(byte[] seq) {

		int len = seq.length;

		byte[] tba = new byte[len];

		for (int i = 0; i < len; i++) {

			switch (seq[len - i - 1]) {

			case 'A':
				tba[i] = 'T';
				break;

			case 'C':
				tba[i] = 'G';
				break;

			case 'G':
				tba[i] = 'C';
				break;

			case 'T':
				tba[i] = 'A';
				break;

			default:
				System.out.println("Invalid Character" + (new String(seq)));

				System.exit(1);

			}

		}

		return tba;

	}

}


// this class is instantiated by the supervisor to start calculation and analysis of a subtree

class CalculateSubtreeProcess extends Thread {
	Subtree subtree;

	public CalculateSubtreeProcess(String prefix, int fromLevel, int toLevel,
			int numReads) {
		this.subtree = new Subtree(prefix, fromLevel, toLevel, numReads);
	}

	public void run() {
		
		// run the error-correction
		subtree.buildAndProcess();

	}
}

/*
 * controls the threads which are working on different parts of the tree:
 * threadsToLaunch threads are started simultaneously each with a different
 * prefix. Waits till threads report to have finished until no prefixes are
 * left. Then starts another round (y-loop) to correct reads with more than one
 * error. TODO: find an estimation for subtree-size based on the prefix:
 * Prefixes with lots of Cs and Gs are likely to result in small subtrees.
 */
class Supervisor extends Thread {
	
	//node counts observed in the subtries - to auto-evaluate cutoff
	private int[] nodeCounts;
	// number of iterations the algorithm is to be run
	public static int numberOfIterations = 3;
	
	// the filenames for the textual output of reads
	public static String correctReadsFilename = "/tmp/jasc/correctreads.fas"
		, discardedReadsFilename = "/tmp/jasc/discardedreads.fas";
	
	// the parameters fromLevel and toLevel indicate which parts of the tree are to be built
	public static int fromLevel = 20, toLevel =24;
	
	//the number of threads to be launched in parallel - adjust to system
	public static int threadsToLaunch = 8;
	
	private int threadsLaunched = 0;
	
	//the depth in which the suffix trie is to be parted - partinf it deep down
	//helps saving memory, because the portions are smaller.
	public static int trieDepth = 3;

	
	// Iterator to generate the prefixes for each subtree - can be exchanged with any smarter routine
	private StringIterator strings;


	public Supervisor() {
		strings = new StringIterator(trieDepth);
		nodeCounts=null;
	}
	
//	private int findLocalMinimumInNodeCounts(){
//		int localMinIndex = 0;
//		
//		for(int i=1;i<nodeCounts.length;i++){
//			if(nodeCounts[i] < nodeCounts[localMinIndex])
//				localMinIndex = i;
//			else break;
//		}
//		return localMinIndex;
//	}
	private int findLocalMinimumInNodeCounts() {
		//assume counts[1] is the highest peak
		//give it a bit of space in case that first peak is wide
		//->find the second peak and the valley in between
		int localMaxIndex = 5;
		for(int i=6;i<nodeCounts.length;i++){
			if(nodeCounts[i] > nodeCounts[localMaxIndex])
				localMaxIndex = i;
		}
	
		int localMinIndex = 2;
		for(int i=3;i<=localMaxIndex;i++){
			if(nodeCounts[i] < nodeCounts[localMinIndex])
				localMinIndex = i;
		}
		if(localMinIndex == localMaxIndex)
			return -1;
		return localMinIndex;
	}
	
	private double calculateRatioOfNodes(int seperator){
		int sum1=0, sum2=0;
		
		for(int i=0;i<seperator;i++)
			sum1+=nodeCounts[i];
		for(int i=seperator;i<nodeCounts.length;i++)
			sum2+=nodeCounts[i];
		
		return (double)sum2/(sum1+sum2);
	}

	public void run() {
		
		//main loop: each iteration stands for a whole execution of the algorithm
		for (int y = 0; y<numberOfIterations; y++) {
			
			int totalNumberOfThreadsToFinish = (int)Math.pow(4, trieDepth);
			int totalNUmberOfThreadsLaunched = 0;
			
			//create the threads and run them
			
//			CalculateSubtreeProcess p = new CalculateSubtreeProcess(strings.next(),
//					fromLevel, toLevel, Shrec.readcount);
//			p.start();
			while (strings.hasNext()) {
				
				CalculateSubtreeProcess p = new CalculateSubtreeProcess(strings.next(), fromLevel, toLevel,
						Shrec.readcount);
				p.start();
				synchronized (this) {
					threadsLaunched++;
					totalNUmberOfThreadsLaunched++;
					if(totalNUmberOfThreadsLaunched* 4 == totalNumberOfThreadsToFinish){
						System.out.println("25% of the round done...");
					} else if(totalNUmberOfThreadsLaunched* 2 == totalNumberOfThreadsToFinish)
						System.out.println("50% of the round done...");
					else if((totalNUmberOfThreadsLaunched*4)/3 == totalNumberOfThreadsToFinish)
						System.out.println("75% of the round done...");
				}

				if (threadsLaunched >= threadsToLaunch) {
					
					// maximum number of threads running -> suspend supervisor
					synchronized (this) {
						try {
							wait();
						} catch (Exception e) {
							e.printStackTrace();
						}
					}

				}
			}
			
			// Main part of the loop is done: all prefixes are handled
			// Wait for all threads still running to finish
			while (threadsLaunched > 1) {
				synchronized (this) {
					if (threadsLaunched > 1)
						try {
							wait();
						} catch (Exception e) {
							e.printStackTrace();
						}
				}
			}
			
			// statistics...
			//System.out.println("Corrected " + aligned + " reads.");
			
			// prepare fresh prefixes for the next iteration
			strings = new StringIterator(trieDepth);
			

			System.out.println("Finished Round " + (y+1)); //- identified "+Subtree.identifiedReads.cardinality()+"reads");
			
			if(y+1 == numberOfIterations)
				break;
			
			System.out.println("Node Counts histogram of this round - the y-axis is in log scale. The x-axis shows the current cutoff value with vertical bars");
			System.out.println(HistogramGenerator.printAsciiHistogram(nodeCounts, SuffixNode.expectedVisitsPerNode));
			
			
			int localMin = findLocalMinimumInNodeCounts();
			if(localMin < 0){
				System.out.println("No conclusive local minimum of node coverage could be established."
						+" Using default of 5 instead. Please specify manually, if other value desired");
				localMin = 5;
			}
			else if(calculateRatioOfNodes(localMin) < (1/6)){
				System.out.println("Estimated cutoff ("+localMin+") cuts off too many nodes!");
				System.out.println("Using default of 5 instead. Please specify manually, if other value desired");
				localMin = 5;
			}
			
			if(localMin + 1 != SuffixNode.expectedVisitsPerNode)
				System.out.println("Re-adjusting cutoff to "+(localMin + 1));
			else
				System.out.println("Leaving cutoff as before.");
			SuffixNode.expectedVisitsPerNode = localMin + 1;
			
			// prepare a fresh BitSet - also for statistical purposes
			//Subtree.identifiedReads = new BitSet(Shrec.readcount);
		}

		
	}

	public synchronized void processFinished(int[] counts) {
		// System.out.println("Wake up!");
		if(nodeCounts == null)
			nodeCounts = counts;
		else {
			for(int i=0;i<counts.length;i++)
				nodeCounts[i] += counts[i];		
		}
		//System.out.println(HistogramGenerator.printAsciiHistogram(counts, SuffixNode.expectedVisitsPerNode));
		this.threadsLaunched--;
		this.notify();
	}
	

}

/* the Shrec class contains the main routine, getting everything started, and 
 * some (global :() variables.
 */
public class Shrec {

	// the reads from the input file
	public static byte[][] reads;
	
	// the number of reads in input-file.
	public static int readcount;
	
	// for statitical purpose: we provide our input files with information about errors,
	// so the algorithm can check its performance.
	// annotated files: 
	//public static BitSet erroneusReads;
	
	// the object controlling the algorithm
	public static Supervisor supervisor;

	
	private static int countLines(String filename) throws IOException {
	    InputStream is = new BufferedInputStream(new FileInputStream(filename));
	    byte[] c = new byte[1024];
	    int count = 0;
	    int readChars = 0;
	    boolean dumpLine = false;
	    while ((readChars = is.read(c)) != -1) {
	        for (int i = 0; i < readChars; ++i) {
	            if (c[i] == '\n'){
	            	if(dumpLine)
	            		count -= 3;
	            	else 
	            		++count;
	            	dumpLine = false;
	                
	            } else if(c[i]=='N')
	            	dumpLine = true;
	        }
	    }
	    return count;
	}
	
	// the main routine handles parsing the input file and starting the supervisor.
	public static void main(String[] args) throws Exception {
		long start, end;

		start = System.currentTimeMillis();
		
		if(args.length<2){
			System.out.println("Usage: Shrec [options] <input reads> <corrected reads output>");
			System.out.println("Options:");
			System.out.println(	"\t-i #n: number of iterations to run (more for longer reads, which tend to have more than one error each (default 3)\n"+
								"\t-l #f #t: specify the levels to check in the suffix trie (default 20 to 24)\n"+
								"\t-c #n: cutoff value - specify the threshold of node counts for an error (default auto -- Shrec starts with a default of 3, and then refines after the first iteration)\n"+
								"\t-d #n: parallelization depth. specify the depth in which the sufix trie is to be divided (higher values for machines with small memory - default 3)\n"+
								"\t-f x: specify the input file format (fasta, fastq - default fastq)\n"+
								"\t-p #n: number of threads to run simultaneously (default 2)");
			System.exit(0);
		}
		
		int parameterIndex =0;
		int cutoff = 2;
		String fileFormat = "fastq";
		while(true){
			String option = args[parameterIndex];
			if(option.startsWith("-")){
				switch(option.charAt(1)){
				case 'i': 
					int iters = Integer.parseInt(args[parameterIndex + 1]);
					Supervisor.numberOfIterations = iters;
					System.out.println("Number of iterations set to "+iters);
					parameterIndex += 2;
					break;
				case 'l': 	Supervisor.fromLevel = Integer.parseInt(args[parameterIndex +1]);
							Supervisor.toLevel =   Integer.parseInt(args[parameterIndex +2]);
							parameterIndex += 3;
							System.out.printf("Investigating the suffix trie on levels %d to %d\n",Supervisor.fromLevel,Supervisor.toLevel);
							break;
				case 'c':
					if(args[parameterIndex +1].equals("auto"))
						System.out.println("Cutoff evaluated automatically.");
					else{
						cutoff = Integer.parseInt(args[parameterIndex +1]);
						System.out.println("Error cutoff set to "+cutoff);
					}
					parameterIndex += 2;
					break;
				case 'd': Supervisor.trieDepth = Integer.parseInt(args[parameterIndex + 1]);
					parameterIndex += 2;
					System.out.println("Seperating trie down to a depth of "+Supervisor.trieDepth);
					break;
				case 'f': fileFormat = args[parameterIndex + 1];
					parameterIndex += 2;
					System.out.println("Input file format set to "+fileFormat);
					break;
				case 'p':
					int threads = Integer.parseInt(args[parameterIndex +1]);
					Supervisor.threadsToLaunch = threads;
					System.out.println("Number of threads to run in parallel set to "+threads);
					parameterIndex += 2;
					break;
				default: System.err.println("Unknown option! Check usage.");
				 	System.exit(0);
				}
			} else{
				break;
			}
		}
		
		if(parameterIndex +1 >= args.length){
			System.err.println("Missing arguments! Check usage.");
			System.exit(0);
		}
		
		// set up the output file - to make sure, the paths actually make sense...
		
		PrintWriter outputCorrected = new PrintWriter(
				args[parameterIndex + 1]);
		
		// start parsing the input file
		
		int numberOfLinesInFile = countLines(args[parameterIndex]);
		
		if(fileFormat.equals("fasta")){
			reads = new byte[numberOfLinesInFile / 2][];
		} else if (fileFormat.equals("fastq")){
			reads = new byte[numberOfLinesInFile / 4][];
		} else {
			System.err.println("Unknown File Format!");
			System.exit(0);
		}
		
		System.out.println(reads.length + " Reads found in input file.");

		FileReader f = new FileReader(args[parameterIndex]);

		BufferedReader b = new BufferedReader(f);

		String line = b.readLine(); // fasta/fastq comment

		Shrec.readcount = 0;

		// annotated files: 
		int moreThanOneError = 0;

		// TODO: As we are assuming all reads are the same length we should have

		// a trimming step somewhere here after the reads have been loaded.

		System.out.println("Parsing input file...");

		String read=null;

		while (line != null) {

			if (fileFormat.startsWith("fast") ) { //fastq or fasta

//				if(fileFormat.endsWith("A")){ //fastqA or fastaA (annotated)
//					if (!(line.contains("Errors:0") || line.contains(",0"))) {
//
//						Shrec.erroneusReads.set(readcount);
//
//						if (!(line.contains("Errors:1") || line.contains(",1"))) {
//
//							moreThanOneError++;
// 	
//						}
//					}
//				}
				
				
				read = b.readLine(); //read sequence
				
//			} else if(fileFormat.equals("soapmap")){
//				
//				SoapRead r = new SoapRead(line);
//				read = new String(r.getReadSequence());
//				
			} else {
				System.err.println("Unkown file format! Check usage.");
				System.exit(0);
			}
			
			if(!read.contains("N")){

				byte[] bytes = read.getBytes();

				reads[readcount++] = bytes;
			
			}
			
			
			if(fileFormat.equals("fastq")){
				b.readLine();
				b.readLine();
				//skip the quality score part
			}

			line = b.readLine(); // read fasta/fastq comment
			

//			readcount++;

		}



		SuffixNode.expectedVisitsPerNode = cutoff;

		// Get on with building and processing the tree

		//Subtree.correctedReads = new BitSet(readcount);
		//Subtree.identifiedReads = new BitSet(readcount);
		
		
		System.out.println("All set; starting error correction.");

		Shrec.supervisor = new Supervisor();
		supervisor.start();
		supervisor.join();
		
		System.out.println("Error correction finished; writing output files");
		
		//Subtree trie = new Subtree("CGGGG",14,18,0);
		
		//trie.buildAndProcess();

		// all that follows is statistics that work with the annotated input files
		// they give information about error-correction rate and false positives
		// annotated files: 
//		int falseIdentified = 0, falseCorrected = 0;
//
//		
//		if(annotatedFiles){
//			
//			for (int i = Subtree.identifiedReads.nextSetBit(0); i >= 0; i = Subtree.identifiedReads		
//			.nextSetBit(i + 1)) {
//	
//				if (!erroneusReads.get(i)) {
//	
//					falseIdentified++;
//					
//					if (Subtree.correctedReads.get(i)) {	
//						falseCorrected++;
//					}
//				}
//			}
//		}
//
//		int numCorrected = 0;
//		
//		falseIdentified -= falseCorrected;
//
//		numCorrected = Subtree.correctedReads.cardinality();
//
//		// annotated files: 
//		int truePositives = (numCorrected - falseCorrected);
//
//		// annotated files: 
//		int numReadsWithErrors = erroneusReads.cardinality();
//
//		int numIdentified = Subtree.identifiedReads.cardinality();
//
//		// annotated files: 
//		int trueIdentified = numIdentified - falseIdentified;
//
//		System.out.println(numCorrected + " reads corrected.");
//
//		System.out.println(numIdentified + " reads (additionally) identified.");
//
//		if(annotatedFiles){
//			System.out.println("false positives: " + falseCorrected
//			// annotated files: 		
//					+ " (corrected), " + falseIdentified + " (identified).");
//	
//			// annotated files: 
//			System.out.println(truePositives + " of " + numReadsWithErrors
//			// annotated files: 		
//					+ " true positives.");
//	
//			// annotated files: 
//			System.out.println(trueIdentified + " of " + numReadsWithErrors
//			// annotated files: 		
//					+ " correctly identified.");
//	
//			// annotated files: 
//			System.out.println("Reads with more than one error: "
//			// annotated files: 		
//					+ moreThanOneError);
//		}
		

		// the algorithm is done by now - write the results to the output file(s)
		
		
			
		f = new FileReader(args[parameterIndex]);

		b = new BufferedReader(f);

		//FileWriter w = new FileWriter("../notCorrected.txt");

		for(int i=0;i<reads.length;i++){

			String comment = b.readLine();
			String commentString = new String(comment);
			read = b.readLine();

//			if(annotatedFiles){
//			
//				int index = comment.indexOf("Errors:") + 7;
//	
//				int strand = Integer.parseInt(""+comment.charAt(comment.indexOf("Strand:") + 7));
//	
//				String errstring;
//	
//				if( comment.length() > index+1 && comment.charAt(index + 1) != ' ') 
//				{ errstring = comment.substring(index,index+2); }
//				else {errstring = ""+comment.charAt(index);}
//	
//	
//	
//				int numberoferrs = Integer.parseInt(errstring);
//	
//				errors += numberoferrs;
//	
//				index += 3;
//	
//				if(numberoferrs > 9)index++;
//	
//				int corrected = 0;
//	
//				for(int j=0;j<numberoferrs;j++){
//	
//					comment = comment.substring(index);
//	
//					int comma = comment.indexOf(',');
//	
//					int errpos = Integer.parseInt(comment.substring(0,comma));
//	
//					char base = comment.charAt(comma + 1);
//	
//
//					byte[] readSequence = reads[i];
//	
//					if(readSequence[errpos] == (byte)base){
//						correctedErrors++;
//						corrected++;
//					} else {
//	
//						//w.write(i+","+strand+", ("+errpos+","+base+") ");
//	
//					}
//	
//					index = comma + 6;
//	
//	
//				}
//				
//				
//				if(numberoferrs == 0){
//					
//					if(! Subtree.identifiedReads.get(i) && ! Subtree.correctedReads.get(i))
//						
//						completelyCorrectedReads++;
//					
//					else {
//						
//						int fps = countFalsePositives(read.getBytes(), reads[i]);
//						
//						falsePositives += fps;
//						
//						if(fps == 0)
//						
//							completelyCorrectedReads++;
//						
//					}
//				} else {
//					
//					int fps = countFalsePositives(read.getBytes(), reads[i]) - corrected;
//					
//					//TODO: kick me out
//					if(fps < 0 )
//						System.err.println("Hang on!");
//					
//					int fns = numberoferrs - corrected;
//					
//					falsePositives += fps;
//					falseNegatives += fns;
//					
//					if(fps == 0 && fns == 0)
//						
//						completelyCorrectedReads++;
//					
//					else  if(corrected > 0)
//						
//						partiallyCorrectedReads++;
//					
//				}
//	
//			}

//			if(!Subtree.identifiedReads.get(i) ){
//				if(Subtree.correctedReads.get(i))
//					commentString += " (corrected)";
//				outputCorrected.println(commentString);
//				outputCorrected.println(new String(reads[i]));
//				if(fileFormat.equals("fastq")){
//					outputCorrected.println(b.readLine());
//					outputCorrected.println(b.readLine());
//				}
//			} else {
//				if(Subtree.correctedReads.get(i)){
//					commentString+= " (corrected) + (identified)";
//					outputCorrected.println(commentString);
//					outputCorrected.println(new String(reads[i]));
//					if(fileFormat.equals("fastq")){
//						outputCorrected.println(b.readLine());
//						outputCorrected.println(b.readLine());
//					}
//				} else {
//					outputDiscarded.println(comment);
//					outputDiscarded.println(new String(reads[i]));
//					if(fileFormat.equals("fastq")){
//						outputDiscarded.println(b.readLine());
//						outputDiscarded.println(b.readLine());
//					}
//				}
//				
//			}
			
	
			outputCorrected.println(commentString);
			outputCorrected.println(new String(reads[i]));
			if(fileFormat.equals("fastq")){
				outputCorrected.println(b.readLine());
				outputCorrected.println(b.readLine());
			}

		}
		
		outputCorrected.flush();
		outputCorrected.close();

		//w.flush();
//		if(annotatedFiles){
//			System.out.println("CORRECTED ERRORS IN TOTAL: "+correctedErrors+" OF "+errors);
//			System.out.println("CORRECTED READS IN TOTAL: "+completelyCorrectedReads+" + "+partiallyCorrectedReads);
//			System.out.println("FALSE POSITIVES: "+falsePositives+"; FALSE NEGATIVES: "+falseNegatives);
//		} else{
//			System.out.println("Corrected reads in total: "+Subtree.correctedReads.cardinality());
//			System.out.println("Additionally, discarded or partialy corrected reads: "+Subtree.identifiedReads.cardinality());
//		}
		
		 System.out.println("Error Correction Complete");

		end = System.currentTimeMillis();
		double seconds = ((double) (end - start)) / 1000;

		System.out.println("Calculation took " + seconds + " seconds...");
	}

}
