package demo;

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.util.*;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.*;

/*
 * Sequence of Events
 * 
 * Map:
 * - (starting from 3rd run) read in qualified_candidates_x  & compared to /user/hduser/hadoopdata/"original_data"
 * - pass on to Reduce a new set of potential_candidates to count
 * 
 * Reduce:
 * - count up potential_candidates and spit out as /user/hduser/hadoopdata-output0/part-00000
 * 
 * generateCandidates
 * - read in potential_candidates_x and create new qualified_candidates_x+1
 * - output /user/hduser/qualified_candidates/qualified_candidates_x+1
 * 
 * 
 */

/*
 * Tasks:
 * Test 1 - Counters tested, working 2/22
 * Test 2 - Map's if/else/else & reading counters, & Reduce to eliminate min support, working 2/22
 * Test 3 - generateCandidate can read in first Reduce output, and generateCandidate list for next Map, working 2/22
 * Test 4 - tested on hadoopmini (360 transaction), working 2/23
 * Test 5 - tested on 100K file, seems to skip iteration 2 of 3, then caught up again on iteration 3
 * 			maybe a race issue with the read-files not ready and map already proceeded?
 * 			TODO change all local hdfs calls to distributed cache
 */
public class AprioriDemo {
	
	public static class Map extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable> {
		private final static IntWritable one = new IntWritable(1);
		private Text word = new Text();
		private Path[] localFiles; 

		public void configure(JobConf job) {
			try {
				localFiles = DistributedCache.getLocalCacheFiles(job);
			} catch (IOException e) {
				e.printStackTrace();
			}
		}
		
		public void map(LongWritable key, Text value, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {
			
			BufferedReader br = new BufferedReader(new FileReader(localFiles[0].toString()));
			
			if(br.readLine() == null) {
				String strOriginalData = value.toString();
				StringTokenizer tokenizer = new StringTokenizer(strOriginalData);
				while (tokenizer.hasMoreTokens()) {
					word.set(tokenizer.nextToken());
					output.collect(word, one);
				}
			} else { // all other runs use PartialStrCompare
				// from original data
				String strOriginalData = value.toString();
				
				// from qualified-candidates
				String strCandidate = br.readLine();
			
				// splitting each line into individual items
				String[] strOriginalDataArray = strOriginalData.split("\\s+");
				
				while(strCandidate != null) {
					int match = 0;
					String[] strCandidateArray = strCandidate.split("\\s+");

					for(int i = 0; i < strCandidateArray.length; i++) {
						for(int j = 0; j < strOriginalDataArray.length; j++) {
							if(strCandidateArray[i].equals(strOriginalDataArray[j])) {
								match++;
							}
						}
					}
					
					if(match == strCandidateArray.length) {
						word.set(strCandidate); // write out the potential candidate for min support (Reduce)
						output.collect(word, one);
					}
					
					strCandidate = br.readLine();
				}
			} 
		}
	}

	public static class Reduce extends MapReduceBase implements Reducer<Text, IntWritable, Text, IntWritable> {
		
		public void reduce(Text key, Iterator<IntWritable> values, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {
			int sum = 0;
			while (values.hasNext()) {
				sum += values.next().get();
			}
			// TODO add dynamic min support condition
			if(sum >= 1000) { // 1% of 100K, in T10I4D100K.txt, use Map input records counter instead
				//output.collect(key, new IntWritable(sum)); // use new IntWriteable(sum) if count output is desired
				output.collect(key, null);
			}
		}
	}

	public static void generateCandidates(long counter) {
		// read in whatever Reduce outputs (ie, 1st set = /user/hduser/hadoopdata-output0/part-00000)
		Vector<String> potCandidates = new Vector<String>();
		Vector<String> qualCandidates = new Vector<String>();
		String str1, str2;
		String candPath = "hdfs://localhost:54310/user/hduser/candidates-data/current-candidates-list-" + (counter+1) + ".txt";
		
		try {
			// go find the file just produced by Reduce
			Path inputFilePath = new Path("hdfs://localhost:54310/user/hduser/hadoopdata-output" + counter + "/part-00000");
			FileSystem inputFileSystem = FileSystem.get(new Configuration());
			BufferedReader br1 = new BufferedReader(new InputStreamReader(inputFileSystem.open(inputFilePath)));
			
			// the clone
			FileSystem cloneFs = FileSystem.get(new Configuration());
			Path clonePath = new Path("hdfs://localhost:54310/user/hduser/hadoopdata-output" + counter + "/part-clone");
			BufferedWriter cloneBw = new BufferedWriter(new OutputStreamWriter(cloneFs.create(clonePath)));
			
			// start cloning line by line
			String original;
			original = br1.readLine();
			while(original != null) {
				cloneBw.write(original);
				cloneBw.write("\n");
				original = br1.readLine();
			}
			
			// close both filesystems, and clone's writer
			cloneBw.close();
			cloneFs.close();
			inputFileSystem.close();
			
			// start reading new again, to start processing
			Path inUsedPath = new Path("hdfs://localhost:54310/user/hduser/hadoopdata-output" + counter + "/part-00000");
			FileSystem inUsedFs = FileSystem.get(new Configuration());
			BufferedReader inUsedBr = new BufferedReader(new InputStreamReader(inUsedFs.open(inUsedPath)));
			
			// always read from the Reduce output file first
			String line;
			line = inUsedBr.readLine();
			// 1st iteration, itemsets consist of only 1 item
			if (counter == 0) { // create the first itemset [a,b] but write them out as "a b" to file
				System.out.println("Going into counter=0 if-statement");
				while (line != null) { // read in the file, and put them in vector for processing
					potCandidates.add(line);
					line = inUsedBr.readLine();
				}
				inUsedBr.close();

				// generate [a,b] with vector
				for (int i = 0; i < potCandidates.size(); i++) {
					str1 = potCandidates.get(i);
					for (int j = i + 1; j < potCandidates.size(); j++) {
						str2 = potCandidates.get(j);
						qualCandidates.add(str1 + " " + str2);
						//System.out.println("Writing into qC: " + str1 + " " + str2);
					}
				}

				// output the vector to file as
				FileSystem outputFileSystem = FileSystem.get(new Configuration());
				Path outputFilePath = new Path(candPath); // candPath
				
				if(outputFileSystem.exists(outputFilePath)) {
					outputFileSystem.delete(outputFilePath, true);
				}
				
				BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(outputFileSystem.create(outputFilePath)));
				for (int k = 0; k < qualCandidates.size(); k++) {
					bw.write(qualCandidates.get(k));
					bw.write("\n");
					//System.out.println("Writing out to file: " + qualCandidates.get(k));
				}

				bw.close();
				outputFileSystem.close();
				
				// TODO copy current-candidates-list.txt to current-candidates-list-counter.txt

			} else { // 2nd and on iterations, itemsets has more than 1 item per set
				System.out.println("Going into counter>0 if-statement");

				// reduced clone
				Path clonedPath = new Path("hdfs://localhost:54310/user/hduser/hadoopdata-output" + counter + "/part-clone");
				FileSystem clonedFs = FileSystem.get(new Configuration());
				BufferedReader br2 = new BufferedReader(new InputStreamReader(clonedFs.open(clonedPath)));
				
				// setup write out to file
				FileSystem outputFileSystem = FileSystem.get(new Configuration());
				Path outputFilePath = new Path(candPath); // candPath
				if(outputFileSystem.exists(outputFilePath)) {
					outputFileSystem.delete(outputFilePath, true);
				}
				BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(outputFileSystem.create(outputFilePath)));

				ArrayList<String> al1 = new ArrayList<String>();
				ArrayList<String> al2 = new ArrayList<String>();

				str1 = inUsedBr.readLine(); // reads in "a b", Reduced data
				str2 = br2.readLine(); // the clone
			
				while(str1 != null) {
					al1.add(str1);
					al2.add(str2);
					str1 = inUsedBr.readLine();
					str2 = br2.readLine();
					//System.out.println("Writing into ArrayLists: " + str1 + " : " + str2);
				}
				
				//for(String s : s1) System.out.println(s);
				Set<String> ssAll = new TreeSet<String>();
				
				for(int i = 0; i < al1.size(); i++) { // for each element in s1
					String al1e = al1.get(i);
					for(int j = 0; j < al2.size(); j++) { // compare to each element in s2
						String al2e = al2.get(j); // take in i element of s2
						
						if(al1e.equals(al2e)) { // same itemset, ignore and move s2e up
							j++;
						} else { // if itemsets are different, compare and combine
							String[] al1eArray = al1e.split("\\s+");
							String[] al2eArray = al2e.split("\\s+");
							int match = 0;
							
							for (int i1 = 0; i1 < al2eArray.length; i1++) {
								for (int j1 = 0; j1 < al1eArray.length; j1++) {
									if (al2eArray[i1].equals(al1eArray[j1])) {
										match++;
									}
									
								} // end of inside for-loop
								
							} // end of outside for-loop, check match to see if the itemsets are combinable
							
							if(match == al1eArray.length-1) {
								// at this point can combine strA1 and strA2
								Set<String> ss = new TreeSet<String>();
								for(int k = 0; k < al1eArray.length; k++) {
									ss.add(al1eArray[k]);
									ss.add(al2eArray[k]);
								}
								//System.out.println(ss.toString());
								String candidate = ss.toString().replace("[", "").replace("]", "").replace(",", "");
								//System.out.println("matched: " + candidate);
								ssAll.add(candidate);
								bw.write(candidate);
								bw.write("\n");
								
							} // end of if combiner
							
						} // end of else
						
					} // end of s2 for-loop

				} // end of s1 for loop

				//System.out.println(ssAll);
				inUsedBr.close();
				br2.close();
				bw.close();
				inputFileSystem.close();
				clonedFs.close();
				outputFileSystem.close();
			} // end of else
		} catch (Exception e) {
		}
	}
	
	public static void main(String[] args) throws Exception {
		JobConf conf;
		long fileCounter = 0;
		long reducedCounter = 1;
		String inputPath = args[0];
		String outputPath = args[1];
		
		String candPath = "hdfs://localhost:54310/user/hduser/candidates-data/current-candidates-list-" + fileCounter + ".txt";
		Path path = new Path(candPath);
		FileSystem fs = FileSystem.get(new Configuration());
		if(fs.exists(path)) {
			fs.delete(path, true);
		}
		fs.createNewFile(path);
		
		while(reducedCounter > 1) {
			conf = new JobConf(AprioriDemo.class);
			candPath = "hdfs://localhost:54310/user/hduser/candidates-data/current-candidates-list-" + fileCounter + ".txt";
			DistributedCache.addCacheFile(new Path(candPath).toUri(), conf);
			conf.setJobName("AprioriDemo");

			conf.setOutputKeyClass(Text.class);
			conf.setOutputValueClass(IntWritable.class);

			conf.setMapperClass(Map.class);
			conf.setReducerClass(Reduce.class);

			conf.setInputFormat(TextInputFormat.class);
			conf.setOutputFormat(TextOutputFormat.class);

			FileInputFormat.setInputPaths(conf, new Path(inputPath));
			FileOutputFormat.setOutputPath(conf, new Path(outputPath + fileCounter));
			
			RunningJob job = JobClient.runJob(conf);
			job.waitForCompletion();
			
			reducedCounter = job.getCounters().findCounter("org.apache.hadoop.mapred.Task$Counter", "REDUCE_OUTPUT_RECORDS").getValue();
			System.out.println("reducedCounter: " + reducedCounter);
			
			if(reducedCounter > 1) {
				System.out.println("fileCounter before generateCandidates: " + fileCounter);
				generateCandidates(fileCounter);
				fileCounter++; // must increment after generateCandidate for the 1st if-statement
				System.out.println("fileCounter after generateCandidates: " + fileCounter);
			}			
		}
	}
}
