import java.io.BufferedInputStream;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Set;
import java.util.TreeSet;


// 6111 Advanced Databases
// Project 3
// David Suess - dcs2136@columbia.edu
// Find large datasets in a csv file and association rules 
class DataMine {
	
	// the support and confidence thresholds
	Float  min_sup;
	Float  min_conf;
	
	// the csv filename
	String filename = "";
	TreeSet<ItemType> large_itemsets;
	HashMap<TreeSet<String>, Float> all_supports;
	TreeSet<ConfItemType> assoc_rules;
	BufferedWriter logbuffer;
	
	// for hierarchy processing
	// the key is an item, in the linked list are all generations of parents of that item
	HashMap<String, LinkedList<String>> parents_of_item;
	// the key is a category (or parent if you will) the values are a set of all children of that parent
	HashMap<String, HashSet<String>> children_of_category;

	// This will allow tree sets to support by the support value within an ItemType object
	class ItemTypeSupportComparator implements Comparator<ItemType> {
		public int compare(ItemType a, ItemType b) {
//			if ( (a == null) || (a.data == null) || (a.data.isEmpty())) {
//				return -1;
//			} else if ((b == null) || (b.data == null) || (b.data.isEmpty())) {
//				return -1;
//			}		
//
//			if (a.support < b.support) {
//				return 1;
//			} else {
//				return -1;
//			}

			if ( ((a == null) || (a.data == null) || (a.data.isEmpty())) && ((b == null) || (b.data == null) || (b.data.isEmpty()))) {
				return 0;
			} else if ( (a == null) || (a.data == null) || (a.data.isEmpty())) {
				return -1;
			} else if ((b == null) || (b.data == null) || (b.data.isEmpty())) {
				return 1;
			}
			
			if (a.support < b.support) {
				return 1;
			} else if (a.support > b.support) {
				return -1;
			}
			
			Iterator<String> biter = b.data.iterator();
			for (String s1 : a.data) {
				if (!biter.hasNext()) {
					return -1;  // this has more elements than b so this is smaller than b
				}
				int compareval = s1.compareTo(biter.next());
				if (compareval != 0) {
					return compareval;
				}
			}

			return 0;  // the items are equal. If this item is being added to a set, it will not be added
		}
		// No need to override equals.
	}
	
	// this will allow a treeset to sort by the confidence, then support values of a
	// ConfItemType object
	class ConfItemTypeConfidenceComparator implements Comparator<ConfItemType> {
		public int compare(ConfItemType a, ConfItemType b) {
			if (( (a == null) || (a.lhs == null) || (a.lhs.isEmpty()))
					&& ((b == null) || (b.lhs == null) || (b.lhs.isEmpty()))) {
				return 0;
			} else if ( (a == null) || (a.lhs == null) || (a.lhs.isEmpty())) {
				return -1;
			} else if ((b == null) || (b.lhs == null) || (b.lhs.isEmpty())) {
				return 1;
			}		
			
			if (a.confidence < b.confidence) {
				return 1;
			} else if (a.confidence > b.confidence) {
				return -1;
			}
			
			if (a.support < b.support) {
				return 1;
			} else if (a.support > b.support) {
				return -1;
			}
			
			// confidence's are equal, so now compare using the words of the rule

			Iterator<String> biter = b.lhs.iterator();
			for (String s1 : a.lhs) {
				if (!biter.hasNext()) {
					return -1;  // this has more elements than b so this is smaller than b
				}
				int compareval = s1.compareTo(biter.next());
				if (compareval != 0) {
					return compareval;
				}
			}

			return a.rhs.compareTo(b.rhs);			
			
			
		}
		// No need to override equals.
	}
	
	// Container for the items in an association and the support value
	class ItemType implements Comparable<Object> {
		
		TreeSet<String> data;
		float  support;
		int count;
		
		ItemType() {
			data = new TreeSet<String>();
			support = 0f;
			count = 0;
		}		
		ItemType(String d) {
			data = new TreeSet<String>();
			data.add(d);
			support = 0f;
			count = 0;
		}
		ItemType(TreeSet<String> d) {
			data = new TreeSet<String>();
			for (String str : d) {
				data.add(str);
			}
			support = 0f;
			count = 0;
		}
		// nice format for printing results
		@Override public String toString() {
			boolean first = true;
			String str = "[";
			for (String s : this.data) {
				if (first) {
					str += s;
					first = false;
				} else {
					str += ", " + s;
				}
			}
			str += "], " + this.support*100 + "%";
			return str;
		}
		
		public int compareTo(Object b) throws ClassCastException {
			if (!(b instanceof ItemType))
				throw new ClassCastException("An ItemType object expected");
			ItemType bitem = (ItemType)b;
			if ( this.data.isEmpty() && ((bitem == null) || (bitem.data == null) || (bitem.data.isEmpty()))) {
				return 0;
			} else if ( this.data.isEmpty()) {
				return -1;
			} else if ((bitem == null) || (bitem.data == null) || (bitem.data.isEmpty())) {
				return 1;
			}
			Iterator<String> biter = bitem.data.iterator();
			for (String s1 : this.data) {
				if (!biter.hasNext()) {
					return -1;  // this has more elements than b so this is smaller than b
				}
				int compareval = s1.compareTo(biter.next());
				if (compareval != 0) {
					return compareval;
				}
			}

			return 0;  // the items are equal. If this item is being added to a set, it will not be added
		}
		
//		@Override public boolean equals (Object a) {
//			ItemType aitem = (ItemType)a;
//			for (String s : this.data) {
//				if (!aitem.data.contains(s)) {
//					return false;
//				}
//			}
//			return true;
//		}
	}
	
	class ConfItemType implements Comparable<Object> {
		
		TreeSet<String> lhs;
		String rhs;
		float  support;
		float  confidence;
		
		ConfItemType() {
			lhs = new TreeSet<String>();
			support = 0f;
			confidence = 0f;
		}		
		ConfItemType(String d) {
			lhs = new TreeSet<String>();
			lhs.add(d);
			support = 0f;
			confidence = 0f;
		}
		ConfItemType(TreeSet<String> d) {
			lhs = new TreeSet<String>(d);
			support = 0f;
			confidence = 0f;
		}
		@Override public String toString() {
			boolean first = true;
			String str = "[";
			for (String s : this.lhs) {
				if (first) {
					str += s;
					first = false;
				} else {
					str += ", " + s;
				}
			}
			str += "] => [" + this.rhs + "] (Conf: " + this.confidence*100 + "%, Supp: " + this.support*100 + "%)";
			return str;
		}
		
		public int compareTo(Object b) throws ClassCastException {
//			if (!(b instanceof ConfItemType))
//				throw new ClassCastException("An ConfItemType object expected");
//			ConfItemType bitem = (ConfItemType)b;
//			if ( this.lhs.isEmpty()) {
//				return -1;
//			} else if ((bitem == null) || (bitem.lhs == null) || (bitem.lhs.isEmpty())) {
//				return -1;
//			}
//
//			return this.lhs.first().compareTo(bitem.lhs.first());
			
			
			if (!(b instanceof ConfItemType))
				throw new ClassCastException("An ConfItemType object expected");
			ConfItemType bitem = (ConfItemType)b;
			if ( this.lhs.isEmpty() && ((bitem == null) || (bitem.lhs == null) || (bitem.lhs.isEmpty()))) {
				return 0;
			} else if ( this.lhs.isEmpty()) {
				return -1;
			} else if ((bitem == null) || (bitem.lhs == null) || (bitem.lhs.isEmpty())) {
				return 1;
			}
			Iterator<String> biter = bitem.lhs.iterator();
			for (String s1 : this.lhs) {
				if (!biter.hasNext()) {
					return -1;  // this has more elements than b so this is smaller than b
				}
				int compareval = s1.compareTo(biter.next());
				if (compareval != 0) {
					return compareval;
				}
			}

			return this.rhs.compareTo(bitem.rhs);  // the items are equal. If this item is being added to a set, it will not be added
		}
	}
	
	// Accepts the command line args:  csv file, support threshold, confidence threshold
	// Performs all the operations for this project
	//    1.  Read in a hierarchy file
	//    2.  Read in a csv file
	//    3.  Find the large itemsets that surpass the support threshold.  include ancestors
	//    4.  Find association rules that meed the confidence thresholds
	//    5.  Output all large itemsets and association rules to output.txt and stdout
	public DataMine(String[] args) {
		
		initializeLog();
		String tolog;
		
		if (!processArguments(args)) {
			System.exit(1);
		}
		parents_of_item = new HashMap<String, LinkedList<String>>();
		children_of_category = new HashMap<String, HashSet<String>>();
		
		getHierarchy("hierarchy.txt");
		//printHierarchy();
		//print_children_of_category();
		//System.exit(1);
		//read data into vector
		LinkedList<HashSet<String>> rawdata = readDataFromFile(filename);
		//LinkedList<HashSet<String>> rawdata = readTestData();
		
		System.out.println("Computing large sets");
		computeLargeSets(rawdata);  // populates larte_itemsets
		
		tolog = "\n==Large itemsets (min_sup=" + min_sup*100 + "%)";
		log(tolog + "\n");
		System.out.println(tolog);
		logDataset(large_itemsets);
		
		// experiment looking up the support of a set of items
//		TreeSet<String> words = new TreeSet<String>();
//		words.add("pen");
//		words.add("ink");
//		Float it = all_supports.get(words);
//		if (it == null) {
//			System.out.println("Lookup failed");
//		} else {
//			String str = "[";
//			boolean first = true;
//			for (String s : words) {
//				if (first) {
//					str += s;
//					first = false;
//				} else {
//					str += ", " + s;
//				}
//			}
//			str += "], " + it.floatValue()*100f + "%";
//			System.out.print("Lookup found " + str);
//		}
		
		tolog = "\n==High-confidence association rules (min_conf=" + min_conf*100 + "%)";
		log(tolog + "\n");
		System.out.println(tolog);
		computeConfidence();		// uses large_itemsets
		logConfDataset(assoc_rules);
		
		closeLog();
	}
	
	// a utility method used during development only.
	// this can be used to reformat a datafile, by picking out only a select few columns from
	// a csv file and outputting them into a new csv file.
	private void outputReformattedDataFile(LinkedList<HashSet<String>> data) {
		try {
			BufferedWriter out = new BufferedWriter(new FileWriter("reformated_data.csv", false));

			// header for the mod_business_db.csv is:
			//Vendor Formal Name,Vendor DBA,Contact Name,Telephone,Fax,Email,Certification,Ethnicity,Address Line 1,Address Line 2,City,State,Zip,MailingAddres
			for (HashSet<String> row : data) {
				int item_num = 1;
				boolean first = true;
				for (String item : row) {
					if ((item_num == 1) || (item_num == 8) || (item_num == 13)) {
						String str;
						if (item.contains(",")) {
							str = "\"" + item + "\"";
						} else {
							str = item;
						}
						if (first) {
							first = false;
						} else {
							str = "," + str;
						}
						out.write(str);
					}
					item_num++;
				}
				out.write("\n");
			}
			out.close();
		} catch (IOException e) {
			System.out.println("Error opening reformated_data.csv file");
			System.exit(1);
		}
	}
	
	// compute the association rules and confidence from the large itemsets.
	// loop through the large_itemsets global variable
	// input:  large_itemsets, all_supports and children_of_category by the need for the
	//			uninterestingRule() method
	// output:  assoc_rules
	private void computeConfidence() {
		assoc_rules = new TreeSet<ConfItemType>(new ConfItemTypeConfidenceComparator());
		
		for (ItemType itemset : large_itemsets) {
			// ignore itemsets of less than 2 items because these cannot form an assocation rule
			if (itemset.data.size() < 2) {
				continue;
			}
			// numerator of confidence is the union of LHS and UHS
			// we already have the computed, look it up
			// itemset.data has all the items for (LHS U UHS)
			Float lhs_u_rhs_support = all_supports.get(itemset.data);
			
			// denominator is the support for the LHS
			// there are multiple possibilities for the LHS items
			// we will compute each one.
			// now we need the support for the LHS
			// we only allow one word on the RHS
			// so try each word as the RHS
			TreeSet<String> lhs = new TreeSet<String>();
			//TreeSet<String> rhs = new TreeSet<String>();
			for (String onerhs : itemset.data) {
				lhs = new TreeSet<String>(itemset.data);//(TreeSet<String>)itemset.data.clone();
				lhs.remove(onerhs);
				// now we have lhs and rhs
				// determine if this is an "uninteresting" rule such as
				// lhs => ancestor(lhs) because this always holds for each item in lhs
				if (uninterestingRule(lhs, onerhs)) {
					continue;
				}
				// lookup the support values for each
				Float lhs_support = all_supports.get(lhs);
				if (lhs_support == null) {
					System.out.println("Could not find lhs_support for " + lhs);
					System.exit(1);
				}
				float confidence = lhs_u_rhs_support/lhs_support;
				if (confidence > min_conf) {
					ConfItemType it = new ConfItemType();
					it.lhs = lhs;
					it.rhs = onerhs;
					it.support = lhs_u_rhs_support;
					it.confidence = confidence;
					assoc_rules.add(it);
				}
			}
		}
	}
	
	// if rhs is an ancestor of any item in lhs, then this rule in uninteresting
	// input:  children_of_category
	private boolean uninterestingRule(TreeSet<String> lhs, String rhs) {
		HashSet<String> children = children_of_category.get(rhs);
		if (children == null) {
			return false;
		} else {
			for (String s : lhs) {
				if (children.contains(s)) {
//					System.out.print("Uninteresting rule: [");
//					for (String l : lhs) {
//					   String istr = l.substring(0, Math.min(l.length(), 20));
//					   System.out.print(istr + ", ");
//					}
//					System.out.println("] => [" + rhs + "]");
					return true;
				}
			}
		}
		return false;
	}
	
	// Use the A PRIORI algorithm to compute large item sets.  A large itemset is a one in which
	// the "support" for the items in the set is above a threshold.  "support" is the % of rows
	// in a table that contain all of the items in the itemset.  The A PRIORI algorithm does this
	// efficiently by first computing the support for itemsets of just 1 item.  Then all the
	// itemsets that are below threshold are ignored.  The ones that are above threshold are then
	// combined into every possible combination, and the supports for thoese 2 item sets are 
	// computed.  This process repeats again and again until there are no itemsets that meet
	// threshold.  This is efficient because we do not bother to compute support for itemsets
	// that we know "a priori" will not meet threshold.
	// Input is a structure of the raw data from a csv file
	// output is a global variable large_itemsets and all_supports which is useful for computing
	// confidence later on.
	private void computeLargeSets(LinkedList<HashSet<String>> data) {

		TreeSet<ItemType> C = new TreeSet<ItemType>();
		HashSet<String> l1 = new HashSet<String>();
		ArrayList<TreeSet<ItemType>> L = new ArrayList<TreeSet<ItemType>>();
		all_supports = new HashMap<TreeSet<String>, Float>();
		
		// create a candidate set C
		// first iteration, add all items in data to C
		for (HashSet<String> row : data) {
			for (String item : row) {
					ItemType newitem = new ItemType(item);
					C.add(newitem);					
			}
		}
		int items_in_c = C.size();
		int totalrows = data.size();
		System.out.println("Total raw data rows = " + totalrows);

		int iter = 0;
		boolean firstpass = true;
		// loop through the candidate large item sets in C
		//     compute the support for each set
		//     if the support meets threshold, then add it to a large itemset structure lk
		//     compute a new candidate set C with all combinations of sets that are above thresh
		//     repeat until C comes up empty
		do {
			iter++;
			System.out.println("\nIteration " + iter);
			TreeSet<ItemType> lk = new TreeSet<ItemType>();
			System.out.print("Computing frequencies for " + C.size() + " candidates");
			items_in_c = 0;
			// loop through C and compute frequencies
			for (ItemType item : C) {	// for each rule in C
				int count = 0;
				for (HashSet<String> datarow : data) {  // for each row in data
					boolean allfound = true;
					for (String s : item.data) {	// for each item in rule
						if (!datarow.contains(s)) {
							allfound = false;
							break;
						}
					}
					// if all items in this itemset where found, increase the count
					if (allfound) {
						count++;
					}
				}
				// now use the count to compute the support
				item.support = (float)count/(float)totalrows;
				item.count = count;
				// add the support to all_suports which is helpful later when computing confidence
				all_supports.put(item.data, new Float(item.support));
				if (item.support > min_sup) {
					// it meets threshold so add it to the lk set of large itemsets for this k
					lk.add(item);
					if (firstpass) {
						// for k=1 (itemset of size 1) save these itemsets in a special
						// structure because it is helpful when creating the next iteration of C
						// which is done below
						l1.add(item.data.first());
					}
				}
				items_in_c++;
				if (items_in_c % 10000 == 0 ) {
					System.out.print(" " + items_in_c);
				}
			}
			System.out.println("\nComputed frequencies for " + items_in_c + " items");
			//System.out.println("\nIteration " + iter + " number of candidate items = " + C.size());
			printDatasetToFile(C, "iteration-" + iter + ".txt");

			
			firstpass = false;
			if (lk.isEmpty()) {
				break;
			}
			L.add(lk);
			
			
			C = new TreeSet<ItemType>();
	
			System.out.println("Finding candidates for iteration " + (iter+1));
			// second iteration, compute a new C by taking lk and add items from l1
			items_in_c = 0;
			for (ItemType lkminus1item : lk) {
				//System.out.println("   looking to add to " + lkminus1item.toString());
				for (String l1item : l1) {
					//System.out.println("      does it contain " + l1item);
					if (!lkminus1item.data.contains(l1item)) {
						//System.out.println("          adding " + lkminus1item + " " + l1item);
						ItemType newitem = new ItemType(lkminus1item.data);
						//System.out.println("          should be same except 1 " + newitem);
						newitem.data.add(l1item);
						//System.out.println("          should be same " + newitem);
						//System.out.println("    adding " + newitem);
//						int csize = C.size();
						C.add(newitem);
//						if (csize == C.size()) {
//							System.out.println("size is the same before and after add!");
//						}
						//printDataset(C);
						//System.out.println();
					}
				}
			}
			System.out.println("Found " + C.size());
			//System.out.println("next-Iteration " + iter + ": C.size=" + C.size());
			//printDatasetToFile(C, "next-iteration-" + iter + ".txt");
			//printDataset(C);
		} while (!C.isEmpty());
		
		large_itemsets = new TreeSet<ItemType>(new ItemTypeSupportComparator());
//		TreeSet<ItemType> large_itemsets = new TreeSet<ItemType>();
		// combine all large itemsets in the large_itemsets which will automatically sort them
		// by their support value
		for (TreeSet<ItemType> itemset : L) {
			for (ItemType item : itemset) {
				large_itemsets.add(item);
			}
		}
	
	}
	
	private void printConfDataset(TreeSet<ConfItemType> C) {
		for (ConfItemType item : C) {
			System.out.println(item);
		}		
	}
	
	private void logConfDataset(TreeSet<ConfItemType> C) {
		int i = 0;
		for (ConfItemType item : C) {
			i++;
			System.out.println(i + ": " + item);
			log(i + ": " + item + "\n");
		}		
	}
	
	private void printDatasetToFile(TreeSet<ItemType> C, String filename) {
		try {
			BufferedWriter out = new BufferedWriter(new FileWriter(filename, false));
			for (ItemType item : C) {
				out.write(item + " count = " + item.count + "\n");
				//System.out.println(item);
			}	
			out.close();
		} catch (IOException e) {
			System.out.println("Error opening " + filename + " file");
			System.exit(1);
		}
	}
	
	private void printDataset(TreeSet<ItemType> C) {
		for (ItemType item : C) {
			System.out.println(item);
		}		
	}
	
	private void logDataset(TreeSet<ItemType> C) {
		int i = 0;
		for (ItemType item : C) {
			i++;
			System.out.println(i + ": " + item);
			log(i + ": " + item + "\n");
		}		
	}
	
	// populate parents_of_item and children_of_category with hierarchy from a txt file
	// A hierarchy file contains a hierarchy like so:
	//
	// 	parent 1
	//  	child 1
	//		child 2
	//		parent 2
	//			child 3
	//  parent 3
	//		child 4
	// 	...
	//
	// Nodes underneath a parent must be indented with a tab.
	private void getHierarchy(String filename) {
		System.out.println("Reading  Hierarchy file " + filename);
		byte[] buffer = new byte[(int) new File(filename).length()];
	    BufferedInputStream f = null;
	    try {
	        f = new BufferedInputStream(new FileInputStream(filename));
	        f.read(buffer);      
	        
	    } catch (FileNotFoundException e) {
	    	System.out.println("Error, cannot open hierarchy file " + filename + "\n" + e);
	    	return;
	    } catch (Exception e) {
	    	System.out.println("Error reading hierarchy file " + filename + "\n" + e);
	    	return;
	    } finally {
	        if (f != null) try { f.close(); } catch (IOException ignored) { }
	    }
	    LinkedList<String> ancestors = new LinkedList<String>();
	    boolean reading_item = false;  // set to true when non-whitespace is found on a line
	    							   // and is reset to false at the end of the line
	    boolean ignoring_comment = false;
	    //System.out.println("buffer length = " + buffer.length); 
	    StringBuffer item = new StringBuffer();
	    String last_item = "";
	    int level = 0;
	    int num_tabs = 0;
	    Set<String> categories = new HashSet<String>();
	    for (int i = 0; i < buffer.length; i++) {
	    	
	    	char c = (char)buffer[i];
	    	if (ignoring_comment) {
	    		if (c == '\n') {
	    			ignoring_comment = false;
	    		}
	    	} else if (!reading_item) {
	    		if (c == '\t') {
	    			num_tabs++;
	    		} else if (c == '#') {
	    			ignoring_comment = true;
	    		} else if (!Character.isWhitespace(c)) {
	    			reading_item = true;
	    			item.append(c);
	    			for (int j = ancestors.size(); j > num_tabs; j--) {
	    				ancestors.removeLast();
	    			}
	    			if (level < num_tabs) {
	    				parents_of_item.remove(last_item);
	    			}
	    			level = num_tabs;
	    		}
	    		//ancestors.add(e);
	    	} else {
	    		if (c == '\r') {
	    			// ignore carriage return which appears in windows formatted csv files
	    		} else if (c == '\n') {
	    			if (level != 0) {
	    				parents_of_item.put(item.toString(), new LinkedList<String>(ancestors));
	    				for (String s : ancestors) {
	    					categories.add(s);
	    				}
	    			}
    				last_item = item.toString();
	    			ancestors.add(item.toString());
	    			String istr = item.toString();
	    			istr = istr.substring(0, Math.min(istr.length(), 30));
	    			//System.out.println(istr);
	    			num_tabs = 0;
	    			item.delete(0, item.length());
	    			reading_item = false;
	    		} else {
	    			item.append(c);
	    		}
	    	}
	    }
	    System.out.println("Found " + categories.size() + " categories and " + parents_of_item.size() + " leaf items");
	    // Fill in the children_of_category structure
	    for (String cat : categories) {
	    	HashSet<String> children = new HashSet<String>();
			for (String k : parents_of_item.keySet()) {
				for (String ancest : parents_of_item.get(k)) {
					if (cat.equals(ancest)) {
						children.add(k);
					}
				}
			}
			if (children.size() > 0) {
				children_of_category.put(cat, children);
			}
	    }
	    
	}
	
	private void print_children_of_category() {
		for (String ancest : children_of_category.keySet()) {
			System.out.println(ancest);
			for (String item : children_of_category.get(ancest)) {
				System.out.println("\t" + item.toString());
			}
		}
		System.out.println(parents_of_item.size());

	}
	
	private void print_parents_of_item() {
		for (String item : parents_of_item.keySet()) {
			String istr = item.substring(0, Math.min(item.length(), 20));
			System.out.print(istr + "  :");
			for (String ancest : parents_of_item.get(item)) {
			   System.out.print("  <" + ancest + ">");
			}
			System.out.println();
		}
		System.out.println(parents_of_item.size());

	}
	
	private void addItemToSet(HashSet<String> row, String item) {
		row.add(item);
		LinkedList<String> ancestors = parents_of_item.get(item);
		if (ancestors != null) {
			for (String a : ancestors) {
				row.add(a);
			}
		}
	}
	
	// Reads a csv file into a datastructure
	// 
	private LinkedList<HashSet<String>> readDataFromFile(String filename) {
		
	    //LinkedList<HashSet<String>> data = new LinkedList<HashSet<String>>();
		System.out.println("Reading file");
		byte[] buffer = new byte[(int) new File(filename).length()];
	    BufferedInputStream f = null;
	    try {
	        f = new BufferedInputStream(new FileInputStream(filename));
	        f.read(buffer);      
	        
	    } catch (FileNotFoundException e) {
	    	System.out.println("Error, cannot open file " + filename + "\n" + e);
	    	return null;
	    } catch (Exception e) {
	    	System.out.println("Error reading " + filename + "\n" + e);
	    	return null;
	    } finally {
	        if (f != null) try { f.close(); } catch (IOException ignored) { }
	    }
	    LinkedList<HashSet<String>> data = new LinkedList<HashSet<String>>();
		try {
			BufferedWriter out = null;
			String reformat_data = System.getProperty("reformat_data");
			boolean output_reformatted_data;
			if (reformat_data == null) {
				output_reformatted_data = false;
			} else {
				output_reformatted_data = reformat_data.toLowerCase().equals("true");
			}
		    if (output_reformatted_data) {
					out = new BufferedWriter(new FileWriter("reformated_data.csv", false));
		    }
		    //String file = new String(buffer);
		    System.out.println("Parsing file");
		    HashSet<String> row = new HashSet<String>();
		    boolean inquotes = false;
		    StringBuffer item = new StringBuffer();
		    int items_this_row = 0;
		    int items_first_row = 0;
		    boolean first_row = true;
		    boolean first_item_to_reformat = true;
		    int line = 1;
		    for (int i = 0; i < buffer.length; i++) {
		    	
		    	char c = (char)buffer[i];
		    	if (inquotes) {
		    		if (c == '"') {
		    			inquotes = false;
		    		} else {
		    			item.append(c);
		    		}
		    	} else {
		    		if (c == '"') {
		    			inquotes = true;
		    		} else if (c == ',') {
		    			addItemToSet(row, item.toString()); //row.add(item.toString());
		    			if (output_reformatted_data) {
		    				// header for the mod_business_db.csv is:
		    				//Vendor Formal Name,Vendor DBA,Contact Name,Telephone,Fax,Email,Certification,Ethnicity,Address Line 1,Address Line 2,City,State,Zip,MailingAddres

							if ((items_this_row == 0) || (items_this_row == 7) 
									|| (items_this_row == 10) || (items_this_row == 12)) {
								String str;
								if (item.toString().contains(",")) {
									str = "\"" + item.toString().trim() + "\"";
								} else {
									str = item.toString().trim();
								}
								if (items_this_row == 12) { // remove ? from zipcodes
									str = str.replaceAll("[^\\d-]", "");
								}
								if (first_item_to_reformat) {
									first_item_to_reformat = false;
								} else {
									str = "," + str;
								}
								out.write(str);
							}
		    			}
//		    			if (items_this_row == 0) {
//		    				System.out.println("Line " + line + ": " + item);
//		    			}
		    			item = new StringBuffer();
		    			items_this_row++;
		    		} else if (c == '\r') {
		    			// ignore carriage return which appears in windows formatted csv files
		    		} else if (c == '\n') {
		    			addItemToSet(row, item.toString());//row.add(item.toString());
		    			data.add(row);
		    			if (output_reformatted_data) {
		    				out.write("\n");
		    			}
		    			first_item_to_reformat = true;
		    			item = new StringBuffer();
		    			row = new HashSet<String>();
		    			if (first_row) {
		    				items_first_row = items_this_row;
		    				first_row = false;
		    			}
		    			if (items_this_row == 0) {
		    				// just skip this row, allow blank rows in datafile
		    			} else if (items_this_row != items_first_row) {
		    				System.out.println("Error parsing file " + filename + " on line " + line);
		    				System.out.println("There are " + items_this_row 
		    						+ " items on this line, expecting " + items_first_row);
		    			}
		    			items_this_row = 0;
		    			line++;
		    			if ((line % 4000) == 0) {
		    				System.out.println(line + " lines read");
		    			}
		    		} else {
		    			item.append(c);
		    		}
		    	}
		    }
		    if (output_reformatted_data) {
		    	out.close();
		    	System.exit(0);
		    }
		} catch (IOException e) {
			System.out.println("Error opening reformated_data.csv file");
			System.exit(1);
		}
		return data;
	}
	
	// Used only for testing during development
	// This reads an example dataset like the one in the default project assignment
	private LinkedList<HashSet<String>> readTestData() {
		
		// create an ArrayList of proper size
//		String[][] data = new String[rows][cols] 
		String[][] arraydata  
		    = { {"pen", "ink", "diary", "soap"},
				{"pen", "ink", "diary"},
				{"pen", "diary"},
				{"pen", "ink", "soap"}};
//			= { {"pen", "ink", "diary", "soap"},
//				{"pen", "ink", "diary", "soap"},
//				{"pen", "diary"},
//				{"pen", "ink", "soap"}};	
		LinkedList<HashSet<String>> data = new LinkedList<HashSet<String>>();
		for (String[] row : arraydata) {
			HashSet<String> newrow = new HashSet<String>();
			for (String item : row) {
				newrow.add(item);
			}
			data.add(newrow);
		}
		// read data into the ArrayList
		return data;
	}
	
	private boolean processArguments(String[] args) {
		
		//ensuring proper input arguments
		if (args.length < 3) {
			printUsage();
			return false;
		}
		filename = args[0];

		try {
			min_sup = new Float(args[1]);
			if ((min_sup < 0.0) || (min_sup > 1.0)) {
				System.out.println("Error invalid precision number for " +
						"argument 2, expect real number between 0 and 1");
				printUsage();
				return false;
			}
		} catch (NumberFormatException e) {
			System.out.println("Error invalid precision number for " +
					"argument 2, expect real number between 0 and 1");
			printUsage();
			return false;
		}

		try {
			min_conf = new Float(args[2]);
			if ((min_conf < 0.0) || (min_conf > 1.0)) {
				System.out.println(
						"Error invalid precision number for argument 3,"
						+ " expect real number between 0 and 1");
				printUsage();
				return false;
			}
		} catch (NumberFormatException e) {
			System.out.println(
					"Error invalid precision number for argument 3,"
					+ " expect real number between 0 and 1");
			printUsage();
			return false;
		}
		return true;
	}
	
	// This prepares the output file for the results
	private void initializeLog() {
		try {
			logbuffer = new BufferedWriter(new FileWriter("output.txt", false));
		} catch (IOException e) {
			System.out.println("Error opening output.txt file");
			System.out.println(e);
			System.exit(1);
		}
	}
	
	private void closeLog() {
		try {
			logbuffer.close();
		} catch (IOException e) {
			System.out.println("Error closing output.txt file");
			System.out.println(e);
			System.exit(1);
		}
	}
	
	private void log(String s) {
		try {
			logbuffer.write(s);
		} catch (IOException e) {
			System.out.println("Error writing to output.txt file");
			System.out.println(e);
			System.exit(1);
		}
	}

	private void printUsage() {
		System.out.println(
				"Usage:  DataMine <dataset file> <min_sup> <min_conf>");
	}

	public static void main(String[] args) {
		// Just create a DataMine object, the constructor does everything
		new DataMine(args);
	}
	
}
