package org.egomsl.mw.benchmark;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.orc.mapred.OrcStruct;
import org.apache.orc.mapreduce.OrcInputFormat;
import org.egomsl.mw.HadoopRecord;
import org.egomsl.mw.mapreduce.ABaseOutputFormat;
import org.egomsl.mw.record.RecordImpl;

import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;

public class OrcToABaseTask {

    public static class ORCMapper extends
            Mapper<NullWritable, OrcStruct, BytesWritable, HadoopRecord> {
        public void map(NullWritable key, OrcStruct value, Context output)
                throws IOException, InterruptedException {
            ByteArrayOutputStream keyOs = new ByteArrayOutputStream();
            value.getFieldValue("appsflyer_device_id").write(new DataOutputStream(keyOs));

            ByteArrayOutputStream valueOs = new ByteArrayOutputStream();
            value.write(new DataOutputStream(valueOs));

            HadoopRecord record = new HadoopRecord(
                    new RecordImpl(keyOs.toByteArray(), valueOs.toByteArray())
            );


            output.write(record.getKey(), record);
        }
    }

    public static class ORCReducer extends Reducer<BytesWritable, HadoopRecord, BytesWritable, HadoopRecord> {

        // 实现具体mreduce业务逻辑
        @Override
        public void reduce(BytesWritable key, Iterable<HadoopRecord> values,
                           Reducer<BytesWritable, HadoopRecord, BytesWritable, HadoopRecord>.Context output)
                throws IOException, InterruptedException {

            for(HadoopRecord record:values) {
                output.write(key, record);
            }
        }
    }

    public static void main(String args[]) throws Exception {

        Configuration conf = new Configuration();

        Job job = Job.getInstance(conf, "OrcToABaseTask");

        job.setJarByClass(OrcToABaseTask.class);

        job.setMapperClass(ORCMapper.class);
        job.setReducerClass(ORCReducer.class);

        job.setInputFormatClass(OrcInputFormat.class);
        job.setOutputFormatClass(ABaseOutputFormat.class);

        job.setMapOutputKeyClass(BytesWritable.class);
        job.setMapOutputValueClass(HadoopRecord.class);

        job.setOutputKeyClass(BytesWritable.class);
        job.setOutputValueClass(HadoopRecord.class);

        FileInputFormat.addInputPath(job, new Path(args[0]));
        ABaseOutputFormat.setOutputPath(job, args[1]);

        boolean success = job.waitForCompletion(true);
        Thread.sleep(10*1000);
        System.exit(success ? 0:1);
    }
}
