package partialRDT;

import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Scanner;

import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DefaultStringifier;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;

import util.DataManager;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;

/**
 * Mapper for PRDT prediction
 * @author tigerzhong
 *
 */
public class PRDTTestMapper extends MapReduceBase implements
	Mapper<LongWritable, Text, Text, Text> {
    /**
     * RDT model List
     */
    private static ArrayList<SkewRDTClassification> rdtModel;
    /**
     * Test dataset
     */
    private static Instances testData;
    /**
     * Number of features to skip
     */
    private static int numSkip = 3;
    /**
     * Output Key
     */
    private Text key = new Text();
    /**
     * Output Value
     */
    private Text value = new Text();
    
    //Statics information, can be ignored
    private String[] tp = new String[3];
    private String[] fp = new String[3];
    private String[] fn = new String[3];
    private String acc = "Acc";
    
    @Override
    public void map(LongWritable inKey, Text inValue,
	    OutputCollector<Text, Text> output, Reporter report)
	    throws IOException {
	try {
	    /*Transform, string -> instance*/
	    String items[] = inValue.toString().split(",");
	    String outKey = items[0];
	    for (int i = 1; i < numSkip; i++) {
		outKey += ("," + items[i]);
	    }
	    String trueValue = items[items.length - 1];
	    double features[] = new double[testData.numAttributes()];
	    for (int i = 0; i < testData.numAttributes(); i++) {
		features[i] = Double.parseDouble(items[numSkip + i]);
	    }
	    Instance instance = new DenseInstance(1.0, features);
	    instance.setDataset(testData);
	    /*Classification*/
	    String outValue = "";
	    double preValue = 0.0;
	    double dis[] = new double[testData.numClasses()];
	    for(int i=0;i<rdtModel.size();i++){
		preValue += rdtModel.get(i).classifyInstance(instance);
		double cDis[] = rdtModel.get(i).distributionForInstance(instance);
		for(int c=0;c<testData.numClasses();c++){
		    dis[c]+=cDis[c];
		}
	    }
	    preValue /= rdtModel.size();	//Average
	    for(int c=0;c<testData.numClasses();c++){
		dis[c]/=rdtModel.size();
	    }
	    outValue = preValue+"";
	    for (int c = 0; c < testData.numClasses(); c++) {
		outValue += ("," + dis[c]);
	    }
	    key.set(outKey);
	    value.set(trueValue+"\t"+outValue);
	    /* Output format:
	     *	key <tab> true value <tab> prediction value[class,distribution]
	     */
	    output.collect(key, value);
	    
	    //Statics information, can be ignored
	    if(trueValue.equals("1")){
		if(dis[0]<0.995) report.incrCounter("0.995", tp[0], 1);
		else report.incrCounter("0.995", fn[0], 1);
		if(dis[0]<0.99) report.incrCounter("0.990", tp[1], 1);
		else report.incrCounter("0.990", fn[1], 1);
		if(dis[0]<0.985) report.incrCounter("0.985", tp[2], 1);
		else report.incrCounter("0.985", fn[2], 1);
	    }
	    if(trueValue.equals("0")){
		if(dis[0]<0.995) report.incrCounter("0.995", fp[0], 1);
		if(dis[0]<0.99) report.incrCounter("0.990", fp[1], 1);
		if(dis[0]<0.985) report.incrCounter("0.985", fp[2], 1);
	    }
	    if(Double.parseDouble(trueValue)==preValue) report.incrCounter("Sta", acc, 1);

	} catch (Exception e) {
	    e.printStackTrace();
	}
    }

    @Override
    public void configure(JobConf job) {
	try {
	    // Get data Structure
	    Path[] paths = DistributedCache.getLocalCacheFiles(job);
	    testData = DataManager.getArffDataFromReader(paths[0].toString());
	    // Get parameters
	    numSkip = job.getInt("Data.NumSkip", 1);
	    // Initialize model
	    rdtModel = new ArrayList<SkewRDTClassification>();
	    job.set("io.serializations", "org.apache.hadoop.io.serializer.JavaSerialization,org.apache.hadoop.io.serializer.WritableSerialization");
	    DefaultStringifier<SkewRDTClassification> ds = new DefaultStringifier<SkewRDTClassification>(job, SkewRDTClassification.class); 
	    Scanner modelString = new Scanner(new File(paths[1].toString()));
	    while(modelString.hasNext()){
		modelString.next();	//id
		SkewRDTClassification cmodel = ds.fromString(modelString.next());
		rdtModel.add(cmodel);
	    }   
	    modelString.close();
	    
	    //Statics information, can be ignored. Initialize the evaluation
	    for(int i=0;i<3;i++){
		tp[i] = "TP-"+(i+1);
		fp[i] = "FP-"+(i+1);
		fn[i] = "FN-"+(i+1);
	    }
	    
	} catch (Exception e) {
	    e.printStackTrace();
	}
    }
    
}
