//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////// FOR SPLITING INPUT FILE INTO PARTS FOR NODES ////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
package trunk.core;

import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.net.NetworkTopology;

import java.io.*;
import java.net.URI;
import java.util.ArrayList;

public class daFLongLineInputFormat extends FileInputFormat<IntWritable, BytesWritable> implements JobConfigurable {

    private final int  SPLIT_SIZE      = 64 * 1024 * 1024;        // 16 mb
    private final int   MAP_BLOCK_SIZE  = 8 * 1024;
    private static final double SPLIT_SLOP = 1.1;   // 10% slop
    private int longestQueryLength = 0;
    @Override
    public void configure(JobConf entries) {
        try {
            URI[] files = DistributedCache.getCacheFiles(entries);
            BufferedReader reader = new BufferedReader(new FileReader(new File(files[0].getPath())));
            ////////// get max length for query to be search //////////
            for (String query = reader.readLine(); query != null; query = reader.readLine())
                if (query.length() > longestQueryLength)
                {
                    longestQueryLength = query.length();
                    //System.out.println("querySize: " + longestQueryLength);
                }


        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    @Override
    public RecordReader<IntWritable, BytesWritable> getRecordReader(InputSplit inputSplit, JobConf entries, Reporter reporter) throws IOException {


        if (inputSplit instanceof FileSplit){
            FileSplit fileSplit   = (FileSplit) inputSplit;
            FSDataInputStream inputStream = fileSplit.getPath().getFileSystem(entries).open(fileSplit.getPath());
            return new daRecordReader(inputStream, (int)fileSplit.getStart(), (int)fileSplit.getLength(), longestQueryLength, MAP_BLOCK_SIZE);
        }
        else
            return null;
    }

    @Override
    public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
        FileStatus[] files = listStatus(job);

        // Save the number of input files for metrics/loadgen
        job.setLong(NUM_INPUT_FILES, files.length);
        long totalSize = 0;                           // compute total size
        for (FileStatus file: files) {                // check if a valid files
            if (file.isDirectory()) {
                throw new IOException("Not a file: "+ file.getPath());
            }
            totalSize += file.getLen();
        }

        long goalSize = totalSize / (numSplits == 0 ? 1 : numSplits);
        long minSize = Math.max(job.getLong(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.SPLIT_MINSIZE, 1), 1l /* min split size */);

        // generate splits
        ArrayList<FileSplit> splits = new ArrayList<>(numSplits);
        NetworkTopology clusterMap = new NetworkTopology();
        for (FileStatus file: files) {
            Path path = file.getPath();
            long length = file.getLen();
            if (length != 0) {
                FileSystem fs = path.getFileSystem(job);
                BlockLocation[] blkLocations;
                if (file instanceof LocatedFileStatus) {
                    blkLocations = ((LocatedFileStatus) file).getBlockLocations();
                } else {
                    blkLocations = fs.getFileBlockLocations(file, 0, length);
                }
                if (isSplitable(fs, path)) {
                    //long blockSize = file.getBlockSize();
                    //long splitSize = computeSplitSize(goalSize, minSize, blockSize);

                    long bytesRemaining = length;
                    while (((double) bytesRemaining)/SPLIT_SIZE > SPLIT_SLOP) {
                        String[] splitHosts = getSplitHosts(blkLocations,length-bytesRemaining, SPLIT_SIZE, clusterMap);
                        splits.add(makeSplit(path, length-bytesRemaining, SPLIT_SIZE,splitHosts));
                        bytesRemaining -= SPLIT_SIZE;
                    }

                    if (bytesRemaining != 0) {
                        String[] splitHosts = getSplitHosts(blkLocations, length - bytesRemaining, bytesRemaining, clusterMap);
                        splits.add(makeSplit(path, length - bytesRemaining, bytesRemaining,splitHosts));
                    }
                } else {
                    String[] splitHosts = getSplitHosts(blkLocations,0,length,clusterMap);
                    splits.add(makeSplit(path, 0, length, splitHosts));
                }
            } else {
                //Create empty hosts array for zero length files
                splits.add(makeSplit(path, 0, length, new String[0]));
            }
        }
        //LOG.debug("Total # of splits: " + splits.size());
        for(FileSplit e : splits) {
            System.err.println("start " + e.getStart());
        }

        return splits.toArray(new FileSplit[splits.size()]);

    }

}
