package com.cloudputing.hbase.mapreduce.wiki;

import java.io.File;
import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;

import com.cloudputing.common.util.EJob;

public class MapreduceRead {
    
    public static void read() throws IOException, InterruptedException, ClassNotFoundException
    {
        // Add these statements. XXX
//        File jarFile = EJob.createTempJar("target/classes");
//        EJob.addClasspath("D:/PAUL/WORK/WORK-SPACES/TEST1/cloudputing/src/main/resources");
//        ClassLoader classLoader = EJob.getClassLoader();
//        Thread.currentThread().setContextClassLoader(classLoader);

    	File jarFile = EJob.createTempJar("D:/PAUL/WORK/WORK-SPACES/TEST1/cloudputing/target/classes");
        Configuration config = HBaseConfiguration.create();
//        addTmpJar("file:/D:/PAUL/WORK/WORK-SPACES/TEST1/cloudputing/target/bigdata-1.0.jar",config);
        
        System.setProperty("path.separator", ":");
        Job job = new Job(config, "ExampleRead");
        // And add this statement. XXX
//        ((JobConf) job.getConfiguration()).setJar(jarFile.toString());

//        TableMapReduceUtil.addDependencyJars(job);
//        TableMapReduceUtil.addDependencyJars(job.getConfiguration(),
//                MapreduceRead.class,MyMapper.class);
        
        job.setJarByClass(MapreduceRead.class);     // class that contains mapper
        ((JobConf)job.getConfiguration()).setJar(jarFile.getAbsolutePath());
        
        Scan scan = new Scan();
        scan.setCaching(500);        // 1 is the default in Scan, which will be bad for MapReduce jobs
        scan.setCacheBlocks(false);  // don't set to true for MR jobs
        // set other scan attrs
        
        TableMapReduceUtil.initTableMapperJob(
                "wiki",        // input HBase table name
                scan,             // Scan instance to control CF and attribute selection
                MapreduceRead.MyMapper.class,   // mapper
                null,             // mapper output key 
                null,             // mapper output value
                job);
        job.setOutputFormatClass(NullOutputFormat.class);   // because we aren't emitting anything from mapper
        
//        DistributedCache.addFileToClassPath(new Path("hdfs://node.tracker1:9000/user/root/lib/stat-analysis-mapred-1.0-SNAPSHOT.jar"),job.getConfiguration());
        
        boolean b = job.waitForCompletion(true);
        if (!b) {
            throw new IOException("error with job!");
        }
        
    }
    
    
    public static class MyMapper extends TableMapper<Text, Text> {

        public void map(ImmutableBytesWritable row, Result value,
                Context context) throws InterruptedException, IOException {
            String val1 = getValue(value.getValue(Bytes.toBytes("text"), Bytes.toBytes("qual1")));
            String val2 = getValue(value.getValue(Bytes.toBytes("text"), Bytes.toBytes("qual2")));
            System.out.println(val1 + " -- " + val2);
        }
        
        private String getValue(byte [] value)
        {
            return value == null? "null" : new String(value);
        }
    } 

}