package cas.ihep.test;

import com.google.common.io.Closer;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import java.io.BufferedInputStream;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.InputStreamReader;
import java.util.ArrayList;

public class HadoopMS extends Configured implements Tool{

    public int run(String[] args) throws Exception {
        if (args.length<1){
            System.err.println("Usage: HadoopMS <description file>");
            return -1;
        }
        try(Closer closer=Closer.create()) {
            JobConf conf = new JobConf(getConf(), HadoopMS.class);
            conf.setJobName("wordcount");
            //conf.setQueueName("ybj");
            Job job = Job.getInstance(conf);
            job.setInputFormatClass(TextInputFormat.class);
            job.setOutputFormatClass(NullOutputFormat.class);
            job.setUser(System.getenv("LOGNAME"));
            job.setMapperClass(ExportMapper.class);
            job.setNumReduceTasks(0);
            FileInputStream fis = closer.register(new FileInputStream(args[0]));
            InputStreamReader freader=closer.register(new InputStreamReader(fis));
            BufferedReader finput=closer.register(new BufferedReader(freader));
            String line;
            ArrayList<Path> paths=new ArrayList<>();
            while((line=finput.readLine())!=null){
                if(line.length()>0){
                    paths.add(new Path(line));
                }
            }
            FileInputFormat.setInputPaths(job, paths.toArray(new Path[paths.size()]));
            FileOutputFormat.setOutputPath(job, new Path(args[1]));
            job.submit();
            return 0;
        }
    }

    public static void main(String args[]) throws Exception {
        System.exit(ToolRunner.run(new HadoopMS(),args));
    }
}
