package regression.MR;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;

import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;

import regression.model.AbstractRegression;
import utils.HadoopUtil;
/**
 * Reducer for gradient
 * @author tigerzhong
 *
 */
public class GradientReducer extends MapReduceBase implements
	Reducer<Text, Text, Text, Text> {
    /**
     * Number of classes
     */
    private int numClasses;
    /**
     * Regression object
     */
    AbstractRegression regression;
    /**
     * Output Value
     */
    private Text tVal = new Text();
    
    @Override
    public void reduce(Text inKey, Iterator<Text> inValues,
	    OutputCollector<Text, Text> output, Reporter report)
	    throws IOException {
	List<double[]> devList = new ArrayList<double []>();
	while (inValues.hasNext()) {
	    String items[] = inValues.next().toString().split(",", -1);
	    double[] devs = new double[items.length];
	    for (int i = 0; i < numClasses; i++) {
		devs[i] = Double.parseDouble(items[i]);
	    }
	    devList.add(devs);
	}
	double parameters[] = regression.update(Integer.parseInt(inKey.toString()), devList);
	String outValue = "";
	for (int j = 0; j < numClasses; j++)
	    outValue += (parameters[j] + "\t");
	tVal.set(outValue);
	output.collect(inKey, tVal);
    }

    @Override
    public void configure(JobConf job) {
	numClasses = job.getInt("Data.NumClasses", 1);
	Path[] paths;
	try {
	    paths = DistributedCache.getLocalCacheFiles(job);
	    regression = HadoopUtil.HDFSLoadModel(job,paths[0].toString());
	} catch (IOException e) {
	    e.printStackTrace();
	}
    }
}
