package bigo.multiplication;

import bigo.BigODriver;
import bigo.data.Matrix;
import bigo.data.MatrixMeta;
import bigo.data.TwoMatrices;
import bigo.data.Vector;
import bigo.data.Vector.Wrapper;
import bigo.lib.InputFormat;
import bigo.lib.InputMatrix;
import bigo.lib.OutputFormat;
import bigo.lib.TextMatrixInputFormat;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.HashSet;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper.Context;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Logger;

/**
 * This Driver partitions the multipliers into partitions according to User's
 * specific partition schema.
 * 
 * Reviewed @ Dec 4
 * 
 * @author Song Liu (sl9885@bristol.ac.uk)
 */
public class PartitionDriver extends BigODriver {

    @InputFormat
    public static Class input = TextMatrixInputFormat.class;
    @OutputFormat
    public static Class output = SequenceFileOutputFormat.class;
    @InputMatrix
    public static MatrixMeta A, B;

    /**
     * Construct partition driver on two multipliers.
     *
     * @param A
     *            The first multiplier
     * @param B
     *            The second multiplier
     */
    public PartitionDriver(MatrixMeta A, MatrixMeta B) {
        PartitionDriver.A = A;
        PartitionDriver.B = B;
    }

    /**
     * This class defines the additional information of a vector that is
     * partitioned This class also behaviors as the sorting key during the
     * reducing procedure.
     */
    public static class PartitionKey implements
            WritableComparable<PartitionKey> {

        public String sourcePath;
        public int i, j, k, rowKey;
        public int partitionNO;

        public PartitionKey() {
        }

        /**
         * Construct a PartitionKey uses the following arguments
         *
         * @param sourcePath
         *            the original matrix source of this vector
         * @param i
         * @param j
         * @param k
         * @param rowKey
         *            which row it belongs to?
         */
        public PartitionKey(String sourcePath, int i, int j, int k, int rowKey) {
            this.i = i;
            this.j = j;
            this.k = k;
            this.rowKey = rowKey;
            this.sourcePath = sourcePath;

            this.partitionNO = i ^ j ^ k;
        }

        public void write(DataOutput out) throws IOException {
            out.writeInt(i);
            out.writeInt(j);
            out.writeInt(k);
            // TODO: may cause problem
            out.writeBoolean(sourcePath.equals("A"));

            out.writeInt(rowKey);
            out.writeInt(partitionNO);
        }

        public void readFields(DataInput in) throws IOException {
            i = in.readInt();
            j = in.readInt();
            k = in.readInt();
            // TODO: may cause problem
            sourcePath = in.readBoolean() ? "A" : "B";

            rowKey = in.readInt();
            partitionNO = in.readInt();
        }

        /**
         * Not used, since it may cause an unknown error which gives me EOF
         * error
         *
         * @param o
         * @return
         */
        public int compareTo(PartitionKey o) {
            throw new UnsupportedOperationException("Not supported yet.");
        }

        /**
         * Use the raw comparator against Bytes
         */
        public static class Comparator implements RawComparator<PartitionKey> {

            public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2,
                    int l2) {
                if (l1 != 21 || l2 != 21) {
                    throw new IllegalArgumentException(
                            "Corrupted Partition Key!");
                }
                return WritableComparator.compareBytes(b1, s1, l1, b2, s2, l2);
            }

            public int compare(PartitionKey o1, PartitionKey o2) {
                throw new UnsupportedOperationException("Not supported yet.");
            }
        }

        @Override
        public String toString() {
            return "PartitionKey{" + "i=" + i + " j=" + j + '}';
        }
    }

    public static class RowParititioner extends Partitioner<PartitionKey, Wrapper> {

        @Override
        public int getPartition(PartitionKey key, Wrapper value, int i) {
            return new HashCodeBuilder().append(key.i).toHashCode() % i;
        }
    }

    public static class BalancedPartitioner extends Partitioner<PartitionKey, Wrapper> {

        @Override
        public int getPartition(PartitionKey key, Wrapper value, int i) {
            return new HashCodeBuilder().append(key.i).
                    append(key.j).append(key.k).toHashCode() % i;
        }
    }

    /**
     * The secondary sorter for the secondary sorting for regrouping the vectors
     * that has been partitioned into the same block
     */
    private static class GroupComparator implements RawComparator<PartitionKey> {

        public int compare(PartitionKey k1, PartitionKey k2) {
            throw new UnsupportedOperationException("Not supported yet.");
        }

        public int compare(byte[] bytes1, int s1, int l1, byte[] bytes2,
                int s2, int l2) {
            return WritableComparator.compareBytes(bytes1, s1, 12, bytes2, s2,
                    12);

        }
    }

    /**
     * Split each Vector into pieces and send them into reducers where they will
     * be organized and grouped as a new submatrix of Matrix A or Matrix B.
     */
    @bigo.lib.Mapper
    public static class Mapper
            extends org.apache.hadoop.mapreduce.Mapper<IntWritable, Wrapper, PartitionKey, Wrapper> {

        private PartitionSchema schema;

        /**
         * The main logic of the partition mapper
         *
         * @param rowIndex
         * @param vector
         * @param isA
         * @param context
         * @throws InterruptedException
         * @throws IOException
         */
        private void emitPartition(int rowIndex, Vector vector, boolean isA,
                Context context) throws InterruptedException, IOException {
            int m = (isA ? schema.numRowBlocksA : schema.numRowBlocksB);
            // see how large the row stride we should take
            int rowStride = isA ? A.numRows / m : B.numRows / m;

            // if the rows are beyond the largest section that we can have,
            // the put it into the last section.
            int i = rowIndex / rowStride >= m ? m - 1 : rowIndex / rowStride;

            // see how large the column we should take
            int n = (isA ? schema.numColBlocksA : schema.numColBlocksB);

            int colStride = vector.length() / n;

            // split the vector into piece accroding to the column stride.
            // please note that the last group of the vector split may
            // contains all the rest elemetns that the vector has.

            for (int colIndex = 0; colIndex < vector.length()
                    && colIndex / colStride < n; colIndex += colStride) {

                // deal with all the rest, if it is the last chance
                Vector sub = vector.subVector(colIndex,
                        colIndex / colStride == n - 1 ? vector.length()
                        : colIndex + colStride);

                // if transposed, reverse the key which will be used to indicate
                // the sub matrix in the final matrix
                //TODO: why not use the state above? test if n-1?
                int ni = i, nk = colIndex / colStride;
                if ((isA && A.isTransposed) || (!isA && B.isTransposed)) {
                    ni = colIndex / colStride;
                    nk = i;
                }

                // see how many time the block that the vector belongs to has to
                // be
                // duplicated, for this point, we have to consider about whether
                // these
                // matrices have been transposed
                int numDuplicates = isA ? (B.isTransposed ? schema.numRowBlocksB
                        : schema.numColBlocksB)
                        : (A.isTransposed ? schema.numColBlocksA
                        : schema.numRowBlocksA);
                // make duplicates
                for (int j = 0; j < numDuplicates; j++) {

                    if (isA) {
                        context.write(
                                new PartitionKey("A", ni, j, nk, rowIndex),
                                new Wrapper(sub));
                    } else {
                        context.write(
                                new PartitionKey("B", j, nk, ni, rowIndex),
                                new Wrapper(sub));
                    }
                }
            }
        }

        @Override
        protected void map(IntWritable key, Wrapper value, Context context)
                throws IOException, InterruptedException {
            int rowIndex = key.get();
            Vector vector = (Vector) value.get();
            String sourcePath = ((FileSplit) context.getInputSplit()).getPath().getParent().getName();

            // if we are only dealing with the same matrix, simply duplicate
            // them
            if (!A.path.equals(B.path)) {
                boolean isA = sourcePath.equals(A.path);
                emitPartition(rowIndex, vector, isA, context);
            } else {
                // by avoiding reading the input matrix twice,
                // here we duplicate a single matrix by emitting it twice
                emitPartition(rowIndex, vector, true, context);
                emitPartition(rowIndex, vector, false, context);
            }

        }

        @Override
        protected void setup(Context context) throws IOException,
                InterruptedException {
            BigODriver.loadFields(context.getConfiguration());
            schema = PartitionSchema.loadFromConfiguration(context.getConfiguration());
        }
    }

    /**
     * Group the vectors that has been sent to reducers according to its
     * partition key
     */
    @bigo.lib.Reducer
    public static class Reducer
            extends org.apache.hadoop.mapreduce.Reducer<PartitionKey, Wrapper, PartitionKey, TwoMatrices> {

        @Override
        protected void reduce(PartitionKey key, Iterable<Wrapper> values,
                Context context) throws IOException, InterruptedException {
            Matrix subA = null, subB = null;

            for (Wrapper wrapper : values) {
                Vector v = (Vector) wrapper.get();
                if (key.sourcePath.equals("A")) {
                    if (subA == null) {
                        // create a sub matrix with the row it starts at
                        subA = new Matrix(key.rowKey);
                    }
                    subA.addRowVector(v);
                } else {
                    if (subB == null) {
                        // create a sub matrix with the row it starts at
                        subB = new Matrix(key.rowKey);
                    }
                    subB.addRowVector(v);
                }
            }
            //Logger.getLogger(this.getClass()).info(key.toString() + subA + " * " + subB+"\n");

            if (subA == null || subB == null) {
                // if any of these matrices is empty, then we don't need to
                // compute
                // it.
                return;
            } else {
                context.write(key, new TwoMatrices(subA, subB));
            }

        }

        @Override
        protected void setup(org.apache.hadoop.mapreduce.Reducer.Context context)
                throws IOException, InterruptedException {
            BigODriver.loadFields(context.getConfiguration());
        }
    }

    @Override
    public void init(Job job) {
        // set the "output" folder as the intermediate output
        SequenceFileOutputFormat.setOutputPath(job, new Path("output"));
    }

    @Override
    public void preRun(Job job) {
        //job.setPartitionerClass(RowParititioner.class);
        job.setPartitionerClass(BalancedPartitioner.class);
        // set up different key and value class for output
        job.setOutputKeyClass(PartitionKey.class);
        job.setMapOutputValueClass(Wrapper.class);
        job.setOutputValueClass(TwoMatrices.class);

        job.setSortComparatorClass(PartitionKey.Comparator.class);
        job.setGroupingComparatorClass(GroupComparator.class);

        // remove the duplicate matrix folders,
        // since we don't need to read it twice,
        // partition mapper will do the job
        HashSet<Path> set = new HashSet<Path>();
        for (Path p : FileInputFormat.getInputPaths(job)) {
            set.add(p);
        }
        try {
            FileInputFormat.setInputPaths(job, set.toArray(new Path[0]));
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        ToolRunner.run(conf, new PartitionDriver(new MatrixMeta("A", 2, 2,
                false), new MatrixMeta("B", 2, 2, true)), args);
    }
}
