package partialRDT;

import java.io.IOException;

import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DefaultStringifier;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.RecordWriter;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextOutputFormat;

import util.DataManager;
import util.ModelOperator;
import weka.core.DenseInstance;
import weka.core.Instances;

/**
 * Mapper for PRDT training
 * @author tigerzhong
 *
 */
public class PRDTTrainMapper extends MapReduceBase implements
	Mapper<LongWritable, Text, Text, Text> {
    /**
     * RDT model
     */
    private SkewRDTClassification rdtModel;
    /**
     * Training data
     */
    private Instances trainData;
    /**
     * Number of features to skip
     */
    private static int numSkip;
    /**
     * Maximal depth of the trees
     */
    private static int depth;
    /**
     * Minimal number of instances at each leaf node
     */
    private static int minNum;
    /**
     * Number of Trees
     */
    private static int numTrees;
    /**
     * Ratio of the major class
     */
    private static double ratio;
    /**
     * HDFS path of model
     */
    private static String modelHDFSPath;
    /**
     * Job
     */
    private static JobConf localJob;

    @Override
    public void map(LongWritable inKey, Text inValue,
	    OutputCollector<Text, Text> output, Reporter report)
	    throws IOException {
	/*Split*/
	String items[] = inValue.toString().split(",", -1);
	double features[] = new double[trainData.numAttributes()];
	for (int i = 0; i < trainData.numAttributes(); i++) {
	    features[i] = Double.parseDouble(items[i + numSkip]);
	}
	/*Add to the dataset*/
	trainData.add(new DenseInstance(1.0, features));
    }

    @Override
    public void configure(JobConf job) {
	try {
	    // Get data Structure
	    Path[] paths = DistributedCache.getLocalCacheFiles(job);
	    trainData = DataManager.getArffDataFromReader(paths[0].toString());
	    // Get parameters
	    numSkip = job.getInt("Data.NumSkip", 1);
	    minNum = job.getInt("RDT.MinNum", 3);
	    depth = job.getInt("RDT.Depth", 12);
	    ratio = job.getFloat("RDT.Ratio",(float) 1.0);
	    numTrees = job.getInt("RDT.Number",30);
	    modelHDFSPath = job.get("Model.HDFSPath");
	    modelHDFSPath = modelHDFSPath + ModelOperator.getIP() + ".rdt.model";
	    localJob = job;
	    localJob.set("io.serializations", "org.apache.hadoop.io.serializer.JavaSerialization,org.apache.hadoop.io.serializer.WritableSerialization");
	    // Initialize model
	    rdtModel = new SkewRDTClassification();
	    rdtModel.setDepth(depth);
	    rdtModel.setMinNum(minNum);
	    rdtModel.setNumTrees(numTrees);
	    rdtModel.setRatio(ratio);
	    //For Unit Test
	    //rdtModel = new SkewRDTClassification();
	    //trainData = DataManager.getC45DataFromReader("data/RBF.100.10.2.names");
	    //modelHDFSPath = "model/SkewRDT.rdt.model";
		
	} catch (Exception e) {
	    e.printStackTrace();
	}
    }

    @Override
    public void close() throws IOException {
	try {
	    /*Build classifier*/
	    rdtModel.buildClassifier(trainData);
	    /*Output model into disk*/
	    DefaultStringifier<SkewRDTClassification> output = new DefaultStringifier<SkewRDTClassification>(localJob, SkewRDTClassification.class); 
	    String s = output.toString(rdtModel);
	    RecordWriter<Text, Text> rwFile = null;
	    rwFile = new TextOutputFormat<Text, Text>().getRecordWriter(null,
		    localJob, modelHDFSPath, null);
	    rwFile.write(new Text(ModelOperator.getIP()), new Text(s));
	    rwFile.close(null);
	} catch (Exception e) {
	    e.printStackTrace();
	}
    }
}
