package forecasting.converter.parser;
import java.io.FileNotFoundException;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Scanner;

import forecasting.converter.FeatureType;
import forecasting.converter.DiscreteValue;
import forecasting.converter.RealValue;
import forecasting.converter.UnknownValue;


public class RandomForestDatasetParser extends AbstractDatasetParserTemplate {
	
	private final static boolean SAVE_CATEGORIES_FOR_DISCRETE_VALUES = false;
	
	/**
	 * This is the delimiter for the output file.
	 */
	public static final String OUTPUT_FILE_DELIMITER = "\t";
	
	
	public RandomForestDatasetParser(final boolean saveCategories) {
		super(saveCategories);
	}
	
	public static void help(String args[]) {
		if(args.length != 3) {
			System.out.println(args.length);
			System.out.println("Converter <file name> <output file name prefix> <library>\n" +
								"<library>: weka | rft");
			System.exit(-1);
		}
	}
	
	/**
	 * @param args
	 * @return TODO
	 */
	public static void main(String[] args) {
		// validate input parameters
		help(args);
		
		final String inputFileName = args[0];
		final String outputFileNamePrefix = args[1];
		final String outputFileName = getOutputFilename(outputFileNamePrefix);
		
		/*if(args[2].equals("weka"))
			SAVE_CATEGORIES_FOR_DISCRETE_VALUES = true;
		else if(args[2].equals("rft"))
			SAVE_CATEGORIES_FOR_DISCRETE_VALUES = false;
		else {
			System.out.println("Error: invalid library value");
			help(args);
		}*/
		
		final RandomForestDatasetParser parser = new RandomForestDatasetParser(SAVE_CATEGORIES_FOR_DISCRETE_VALUES);
		parser.run(inputFileName, outputFileName);
		
		System.out.println("outputFileName = " + outputFileName);
		
		// FIXME: Turning off validation since it's already been tested, and we're now omitting info columns.
		//parser.validateNumColumns(outputFileName);
		System.out.println("Done!");
	}

	private static String getOutputFilename(final String outputFileNamePrefix) {
		return outputFileNamePrefix + ".rft.txt";
	}
	
	/**
	 * {@inheritDoc}
	 */
	protected void createOutputFile(final ArrayList<FeatureType> labels, final String inputFileName, final String outputFileName) {
		// second pass where we attach labels to each column
		PrintWriter outfile = null;
		try {
			outfile = new PrintWriter(outputFileName);
			
			// load file 
			final Scanner scan = openFileScanner(inputFileName);
			
			writeHeaderRow(labels, outfile, scan);
			
			// create the rest of the weka dataset file
			int sampleNum = 0;
			while (scan.hasNext()) {
				++sampleNum;
				if (sampleNum % 10000 == 0) {
					System.out.println("Creating output file: Processing sample #" + sampleNum);
				}
				final String line = scan.nextLine();
				
				// checking for empty rows in the input dataset.
				if (line.length() == 0) {
					// We start at i = 1 because there should be one less delimnter than the number of columns.
					for (int i = 1; i < labels.size(); i++) {
						outfile.print(OUTPUT_FILE_DELIMITER);
					}
					outfile.println();
					continue;
				}
				
				final String featureVector[] = line.substring(0, line.length() - 1).split(OUTPUT_FILE_DELIMITER, -1);

				// validating that we're receiving the correct number of columns
				if (featureVector.length != EXPECTED_FEATURE_VECTOR_SIZE) {
					System.err.println("Unexpected number of columns for sample # " + sampleNum + ", column size = " + featureVector.length);
					System.exit(-1);
				}
				
				printFeatureVector(labels, outfile, featureVector);
			}
			
			outfile.close();
		} catch (FileNotFoundException e) {
			System.out.println(e);
			e.printStackTrace();
		}
	}

	/**
	 * This method creates the header row for the dataset file.
	 * @param labels
	 * @param outfile
	 * @param scan
	 */
	private void writeHeaderRow(final ArrayList<FeatureType> labels,
			final PrintWriter outfile, final Scanner scan) {
		// the first line contains header information, which we should use to append data types for the file format expected by
		// random forest trees.
		if (scan.hasNext()) {
			// We skip the line here, since we've already processed it.
			scan.nextLine();
			
			if (labels.size() != EXPECTED_FEATURE_VECTOR_SIZE) {
				System.err.println("Unexpected number of features in the header");
				System.exit(-1);
			}
			
			boolean isFirst = true;
			for (int i = 0; i < labels.size(); i++) {
				final FeatureType attribute = labels.get(i);
				// If this is the first column, we don't print a comma
				if (!isFirst) {
					outfile.print(OUTPUT_FILE_DELIMITER);
				} else {
					isFirst = false;
				}
				
				if (!(attribute instanceof UnknownValue)) {
					final StringBuilder sb = new StringBuilder();
					sb.append(attribute.getName());
					if (attribute instanceof DiscreteValue) {
						sb.append(":factor");
					} else if (attribute instanceof RealValue) {
						sb.append(":numeric");
					} else {
						System.err.println("Unexepected attribute type: " + attribute.getClass());
					}
					
					outfile.print(sb.toString());
				} else {
					// Unknown column type. Setting this to info.
					outfile.print(attribute .getName() + ":info");
					
					// Let's not add info columns to the dataset
				}
			}
			outfile.println();
		}
	}

	private void printFeatureVector(ArrayList<FeatureType> labels,
			PrintWriter outfile, String[] featureVector) {
		boolean isFirst = true;
		for (int i = 0; i < labels.size(); i++) {
			if (labels.get(i) != null) {
				
				// If this is the first column, we don't print a delimiter
				if (!isFirst) {
					outfile.print(OUTPUT_FILE_DELIMITER);
				} else {
					isFirst = false;
				}				
				
				if (featureVector[i].isEmpty()) {
					outfile.print("");
				} else if (labels.get(i) instanceof RealValue) {
					outfile.print(RealValue.convertToDouble(featureVector[i]));
				} else if (labels.get(i) instanceof UnknownValue) {
					// do nothing, we're omitting unknown values for now.
				} else {
					outfile.print(featureVector[i]);
				}
			}
		}
		outfile.println();
	}
	
	private void validateNumColumns(final String filename) {
		final Scanner scanner = openFileScanner(filename);
		int numColumns = 0;
		if (scanner.hasNext()) {
			final String line = scanner.nextLine();
			numColumns = line.split(OUTPUT_FILE_DELIMITER, -1).length;
			System.out.println("Header has " + numColumns + " column(s)");
		} else {
			System.err.println("File is empty: " + filename);
			return;
		}
		
		int lineNum = 0;
		while (scanner.hasNext()) {
			final String line = scanner.nextLine();
			++lineNum;
			
			final int currentNumColumns = line.split(OUTPUT_FILE_DELIMITER, -1).length;
			//System.out.println("Line #" + lineNum + ", currentNumColumns = " + currentNumColumns);
			
			if (currentNumColumns != numColumns) {
				System.err.println("Incorrect number of columns found on line #" + lineNum);
				System.exit(-1);
			}
		}
	}
}
