package bigo.multiplication;

import bigo.BigODriver;
import bigo.data.ArrayVector;
import bigo.data.ArrayVectorCombo;
import bigo.data.MatrixMeta;
import bigo.data.SmartMatrix;
import bigo.data.TwoMatrices;
import bigo.data.Vector.Wrapper;
import bigo.lib.InputFormat;
import bigo.lib.OutputFormat;
import bigo.lib.OutputMatrix;
import bigo.lib.TextMatrixOutputFormat;
import bigo.multiplication.PartitionDriver.PartitionKey;
import java.io.IOException;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper.Context;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.util.ToolRunner;

/**
 * This Driver provides the second stage of the Matrix Multiplication procedure.
 * Reviewed @ 12.04
 * @author Song Liu(sl9885@bristol.ac.uk)
 */
public class CalculationDriver extends BigODriver {

    @InputFormat
    public static Class input = SequenceFileInputFormat.class;
    @OutputFormat
    public static Class output = TextMatrixOutputFormat.class;
    @OutputMatrix
    public static MatrixMeta C;

    /**
     * This Driver relies only one parameter to create. The final output matrix
     * @param C the output matrix for the multiplication 
     */
    public CalculationDriver(MatrixMeta C) {
        CalculationDriver.C = C;
    }

    @bigo.lib.Mapper
    public static class Mapper extends org.apache.hadoop.mapreduce.Mapper<PartitionDriver.PartitionKey, TwoMatrices, IntWritable, Wrapper> {

        boolean isATransposed, isBTransposed, rowBased;

        @Override
        protected void map(PartitionKey key, TwoMatrices value, Context context) throws IOException, InterruptedException {
            SmartMatrix A = new SmartMatrix(value.matrixA, isATransposed);
            SmartMatrix B = new SmartMatrix(value.matrixB, isBTransposed);

            int i = rowBased ? A.getColumnOffset() : B.getRowOffset();
            for (ArrayVector av : A.multiply(B, rowBased)) {
                context.write(new IntWritable(i),
                        new Wrapper(av));
                i++;
            }
        }

        @Override
        protected void setup(Context context) throws IOException, InterruptedException {
            super.setup(context);
            isATransposed = context.getConfiguration().getBoolean("trans.A", false);
            isBTransposed = context.getConfiguration().getBoolean("trans.B", false);
            rowBased = context.getConfiguration().getBoolean("rowbased", true);
        }
    }

    /**
     * Merges and Accumulates the results that generated in Mapper
     */
    @bigo.lib.Combiner
    public static class Combiner extends org.apache.hadoop.mapreduce.Reducer<IntWritable, Wrapper, IntWritable, Wrapper> {

        @Override
        protected void reduce(IntWritable key, Iterable<Wrapper> values,
                Context context) throws IOException, InterruptedException {
            ArrayVectorCombo combo = new ArrayVectorCombo();
            for (Wrapper w : values) {
                ArrayVector v = (ArrayVector) w.get();
                combo.addVector(v);
            }
            context.write(key, new Wrapper(combo));
        }
    }

    /**
     * aggregate the ArrayVectorCombos
     * @param values
     * @return
     */
    protected static ArrayVectorCombo reduceAggregate(Iterable<Wrapper> values) {
        ArrayVectorCombo combo = new ArrayVectorCombo();
        for (Wrapper w : values) {
            ArrayVectorCombo avc = (ArrayVectorCombo) w.get();
            combo.addCombo(avc);
        }
        return combo;
    }

    /**
     * Aggregates all the intermediate results from Mapper, and output to its final
     * destination
     */
    @bigo.lib.Reducer
    public static class Reducer extends org.apache.hadoop.mapreduce.Reducer<IntWritable, Wrapper, IntWritable, Wrapper> {

        @Override
        protected void reduce(IntWritable key, Iterable<Wrapper> values, Reducer.Context context)
                throws IOException, InterruptedException {
            ArrayVector row = reduceAggregate(values).toArrayVector();
            context.write(key, new Wrapper(row));
        }
    }

    public static class Partitioner extends org.apache.hadoop.mapreduce.Partitioner<IntWritable, Wrapper> {

        @Override
        public int getPartition(IntWritable key, Wrapper value, int i) {
            return new HashCodeBuilder().append(key.get()).toHashCode() % i;
        }
    }

    @Override
    public void init(Job job) {
        try {
            SequenceFileInputFormat.addInputPath(job, new Path("output"));

        } catch (IOException ex) {
            Logger.getLogger(CalculationDriver.class.getName()).log(Level.SEVERE, null, ex);
            System.exit(0);
        }
    }

    @Override
    public void preRun(Job job) {
        job.setPartitionerClass(Partitioner.class);
    }

    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        ToolRunner.run(conf,
                new CalculationDriver(new MatrixMeta("C", 2, 2)),
                args);

    }
}
