import java.io.*;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.ArrayList;
import java.net.URI;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.util.*;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.net.NetworkTopology;


//* setSplitSize1 (map)
//* setSplitSize2 (InputFormat)	
//	= 524288

//* setStart (fileSize-splitSize)		
//	= fileSize - splitSize
//	= 1048576 - 524288 byte
//	= 524288 byte
//--------------------------------	
//* setSplitSize1 (map)
//= 33554432

//* setSplitSize2 (InputFormat)	
//= 33554432

//* setStart (fileSize-splitSize)		
//= fileSize - splitSize
//= 1073741824 - 33554432 byte
//= 1040187392 byte
//--------------------------------	
//* setSplitSize1 (map)
//= 67108864

//* setSplitSize2 (InputFormat)	
//= 67108864

//* setStart (fileSize-splitSize)		
//= fileSize - splitSize
//= 1073741824 - 67108864 byte
//= 1006632960 byte

 
public class ds2 extends Configured implements Tool {
	private static final Log LOG = LogFactory.getLog(ds2.class);
//checked
	public static class Map extends MapReduceBase implements Mapper<IntWritable, BytesWritable, Text, Text> {
		private File query;									      	//	inputkey, inputval, outputkey, outputval
	//**setSplitSize1	
	//	private	int splitSize=33554432;
		private Text word = new Text();
		private Text one = new Text();
	    private String curFile;
	    
		public void configure(JobConf job) {
	        query = new File("./query.dat");
	    }
	 
		public void map(IntWritable key, BytesWritable value, OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
			BufferedReader qReader = new BufferedReader(new InputStreamReader(new FileInputStream(query)));
	/*//dont need this anymore. 1.dat is already input as type BytesWritable				
			String f = value.toString();				// from 1m.dat
			byte[] fByte = f.getBytes();         // from 1m.dat
	*/
			int check=3;
			curFile = ((FileSplit)reporter.getInputSplit()).getPath().getName();
			System.out.println("\nlength: "+value.getLength());
	// ds1's algorithm
			String q;
			byte[] fByte = new byte[value.getLength()]; 
			fByte = value.getBytes();
			while((q = qReader.readLine()) != null){
				int k;
				k=99;
				byte[] qByte = new byte[q.length()];
				qByte = q.getBytes();
//*********SET*********//				
				if (value.getLength()==65536/*67108864*/)
					k= qByte.length;
				for(int i=0; i<=(value.getLength()-k/*splitSize*//*-qByte.length*/); i++){				
					for(int j=0; j<qByte.length; j++){
						if(fByte[i+j]==qByte[j]){
							check=1;
						}
						else{
							check=0;
							break;
						}
					}
					if(check==1){
						word.set(curFile+","+q);
						one.set(Integer.toString(key.get()+i));
						output.collect(word, one);
					}		
				}
	//			System.err.println("a query completed");
			}
			qReader.close();
			System.err.println("map succeed");
		}
	}
	
    public static class AgePartitioner implements Partitioner<Text, Text> {
    	JobConf conf;
        @Override
        public int getPartition(Text key, Text value, int numReduceTasks) {
        	
//*********SET*********//	 
        	int splitsize = 65536;
      //      String [] nameAgeScore = value.toString().split("\t");
      //      String age = nameAgeScore[1];
            int ageInt = Integer.parseInt(value.toString());
           
            //this is done to avoid performing mod with 0
            if(numReduceTasks == 0)
                return 0;
            else
            	return ageInt/splitsize % numReduceTasks;
 /*
            //if the age is <20, assign partition 0
            if(ageInt <=20){               
                return 0;
            }
            //else if the age is between 20 and 50, assign partition 1
            if(ageInt >20 && ageInt <=50){
               
                return 1 % numReduceTasks;
            }
            //otherwise assign partition 2
            else
                return 2 % numReduceTasks;
  */         
        }

		@Override
		public void configure(JobConf arg0) {
			// TODO Auto-generated method stub
			conf=arg0;
		}
    }
  //checked
	  public static class Reduce extends MapReduceBase implements Reducer<Text, Text, Text, Text> {
//		private Text word = new Text();
//		private Text one = new Text();
		public void reduce(Text key, Iterator<Text> values, OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
		//	String sum = "";
/*			
			StringBuilder builder = new StringBuilder();	
		//	Iterator<Text> itv = values;
		  	while (values.hasNext()) {
		//    	sum = sum+values.next().toString();	// change Int value to String
		  		builder.append(values.next().toString());
		    	if (values.hasNext())
		//    		sum = sum + ",";
		    		builder.append(",");
		    	if (builder.length()>=1048576){
                	output.collect(key, new Text(builder.toString()));
                	builder = new StringBuilder();
                }
		  	}
		  	output.collect(key, new Text(builder.toString()));
*/	  	
//			ArrayList<Integer> val = new ArrayList<Integer>(values.toString().length());
			LinkedList<Integer> val = new LinkedList<Integer>();
			while (values.hasNext()) {
				val.add(Integer.parseInt(values.next().toString()));
			}
			Collections.sort(val);
			StringBuilder builder = new StringBuilder();
			for (Integer value : val) {
			    builder.append(value.toString());
			}
			output.collect(key, new Text(builder.toString()));
		  	System.err.println("reduce succeed");	  	
		}
	  }
  
  
  // checked
  public static class newFileInputFormat extends FileInputFormat<IntWritable, BytesWritable> {
	private long minSplitSize = 1;

//**setSplitSize2
//	private int sSize = 67108864; //67108864   =34mb
								  //33554432   =32mb for1gbfile//computeSplitSize(goalSize, minSize, blockSize);
								  //   32768   =32kb for1mbfile
								  //   16384   =16kb
								  // 1048576   = 1mb
    private static final double SPLIT_SLOP = 1.1;
    @Override
    public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
    	FileStatus[] files = listStatus(job);
    	// Save the number of input files for metrics/loadgen
        job.setLong(NUM_INPUT_FILES, files.length);
        long totalSize = 0;                           // compute total size
        for (FileStatus file: files) {                // check we have valid files
            if (file.isDirectory()) {
                throw new IOException("Not a file: "+ file.getPath());
            }
            totalSize += file.getLen();
        }

        long goalSize = totalSize / (numSplits == 0 ? 1 : numSplits);
        long minSize = Math.max(job.getLong(org.apache.hadoop.mapreduce.lib.input.
                FileInputFormat.SPLIT_MINSIZE, 1), minSplitSize);

        // generate splits
        ArrayList<FileSplit> splits = new ArrayList<FileSplit>(numSplits);
        NetworkTopology clusterMap = new NetworkTopology();
        for (FileStatus file: files) {
            Path path = file.getPath();
            long length = file.getLen();
            if (length != 0) {
                FileSystem fs = path.getFileSystem(job);
                BlockLocation[] blkLocations;
                if (file instanceof LocatedFileStatus) {
                    blkLocations = ((LocatedFileStatus) file).getBlockLocations();
                } else {
                    blkLocations = fs.getFileBlockLocations(file, 0, length);
                }
                if (isSplitable(fs, path)) {
                    long blockSize = file.getBlockSize();
//*********SET*********//                    
                    long splitSize = 65536/*67108864*/;
                    long bytesRemaining = length;
                    while (((double) bytesRemaining)/splitSize > SPLIT_SLOP) {
                        String[] splitHosts = getSplitHosts(blkLocations,
                                length-bytesRemaining, splitSize, clusterMap);
                        splits.add(makeSplit(path, length-bytesRemaining, splitSize,
                                splitHosts));
                        bytesRemaining -= splitSize;
                    }

                    if (bytesRemaining != 0) {
                        String[] splitHosts = getSplitHosts(blkLocations, length
                                - bytesRemaining, bytesRemaining, clusterMap);
                        splits.add(makeSplit(path, length - bytesRemaining, bytesRemaining,
                                splitHosts));
                    }
                } else {
                    // if not splitable, simply make split with the whole file length
                    String[] splitHosts = getSplitHosts(blkLocations,0,length,clusterMap);
                    splits.add(makeSplit(path, 0, length, splitHosts));
                }
            } else {
                //Create empty hosts array for zero length files
                splits.add(makeSplit(path, 0, length, new String[0]));
            }
        }
        // Debug - print out detail of each input split
/*        
        for( FileSplit split : splits ) {
            System.out.println("Name:" + split.getPath().getName());
            System.out.println("Start:" + split.getStart());
        }   
        System.out.println("Split Count: " + splits.size());
*/       
        return splits.toArray(new FileSplit[splits.size()]);
    }

	@Override
	public RecordReader<IntWritable, BytesWritable> getRecordReader(InputSplit inputSplit,
			JobConf job, Reporter reporter) throws IOException {
		// TODO Auto-generated method stub 
	    return new newRecordReader(inputSplit, job);
	}
  }
  
  
  //checked
  public static class newRecordReader implements RecordReader<IntWritable, BytesWritable> {

    private FileSplit fileSplit;
    private Configuration conf;
    private boolean processed = false;
    private IntWritable key = new IntWritable();  
    private BytesWritable value = new BytesWritable(); 
    private int start;
//  private int end;
 
//*********SET*********//
    private	int laststart = 983040/*1006632960*/;
    
		//*1gb  = 1073741824 b
		//*1mb  =    1048576 b
		//*32mb =   33554432 b 
		//*32kb =	   32768 b
		//*16mb =	16777216 b
		//*16kb =	   16384 b
		//*the start of 1gb's last split = 1gb -32 mb
		//								  = 1073741824 - 33554432
		
		//*the start of 1gb's last split = 1006632960(64) , 1040187392(32) , 1056964608(16)
		//*the start of 1mb's last split =    1015808(32) ,    1032192(16) 
    
    
    public newRecordReader(InputSplit inputSplit, JobConf conf) throws IOException {
        this.fileSplit = (FileSplit) inputSplit;
        this.conf = conf; 
        
        start = (int) fileSplit.getStart();
 //     end = (int) (start + fileSplit.getLength());   
    }
    
	@Override
	public boolean next(IntWritable key, BytesWritable value) throws IOException {
		// TODO Auto-generated method stub
        if (!processed) {
            byte[] contents = new byte[(int) fileSplit.getLength()+99];
            //* the longest query has 100 bytes
            //	the worst case scenario is that 1 byte is in the n split
            //	and the other 99 bytes are in the n+1 split
            //	+99 is to look ahead into 99 bytes of the n+1 split
            // so 20mb+99b
            Path file = fileSplit.getPath();
            FileSystem fs = file.getFileSystem(conf);
            FSDataInputStream in = null;
            try {
              in = fs.open(file);
              in.seek(start);            
              if(start<laststart) // = fileSize-splitSize
            	  IOUtils.readFully(in, contents, 0, contents.length);
              	  // not last split, content = splitsize+99b
              else 
            	  IOUtils.readFully(in, contents, 0, contents.length-99);
              	  // last split use normal split size as a content (no +99 overreader to next split)
              key.set(start);
              value.set(contents, 0, contents.length);
            } finally {
              IOUtils.closeStream(in);
            }
            processed = true;
            return true;
        }
        return false;
	}

	  
	@Override
	public void close() throws IOException {
		// TODO Auto-generated method stub
		
	}

	@Override
	public IntWritable createKey() {
		// TODO Auto-generated method stub
		return key;
	}

	@Override
	public BytesWritable createValue() {
		// TODO Auto-generated method stub
		return value;
	}

	@Override
	public long getPos() throws IOException {
		// TODO Auto-generated method stub
		return 0;
	}

	@Override
	public float getProgress() throws IOException {
		// TODO Auto-generated method stub
		return processed ? 1.0f : 0.0f;
	}
  }
  
  
  @Override
  public int run(String[] args) throws Exception {
	LOG.info("starting");
	// TODO Auto-generated method stub
	JobConf conf = new JobConf(ds2.class);
	conf.setJobName("ds2");
	
//	conf.setMapOutputKeyClass(Text.class);
//	conf.setMapOutputValueClass(IntWritable.class);
	conf.setOutputKeyClass(Text.class);
	conf.setOutputValueClass(Text.class);

	
	conf.setMapperClass(Map.class);
//	conf.setCombinerClass(Reduce.class);
	conf.setPartitionerClass(AgePartitioner.class);
	conf.setReducerClass(Reduce.class);
	conf.setNumReduceTasks(16);
	
	conf.setInputFormat(newFileInputFormat.class);
	conf.setOutputFormat(TextOutputFormat.class);
	System.out.println("MapTasks: "+conf.getNumMapTasks());
//	conf.getNumReduceTasks();
 
	FileInputFormat.setInputPaths(conf, new Path(args[0]));
	FileOutputFormat.setOutputPath(conf, new Path(args[1]));
	DistributedCache.addCacheFile(new URI("./query.dat"), conf);
	
	JobClient.runJob(conf);
	LOG.info("done");
	return 0;
  }
  
///checked
  public static void main(String[] args) throws Exception {
    int res = ToolRunner.run(new JobConf(), new ds2(), args);
    System.exit(res);
  }
}