package com.jc.test;


import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.util.Iterator;

@SuppressWarnings("ALL")
public class RecordCount implements Tool {
    public static final Logger logger = LoggerFactory.getLogger(RecordCount.class);
    Configuration configuration;

    public void setConf(Configuration conf) {
        conf = new Configuration();
        configuration = conf;
    }

    public Configuration getConf() {
        return configuration;
    }

    // 是版本 0.20.2的实现
    public static class MyMap extends Mapper<Object, Text, Text, IntWritable> {

        @Override
        protected void map(Object key, Text value, Context context) throws IOException, InterruptedException {
            String records = value.toString();//将输入的纯文本的数据转换成String
            int count = records.split("\n").length;
//            logger.debug("记录数={},sql={}",count,records);
            context.write( new Text("count"), new IntWritable(count));//输出学生姓名和成绩

//            System.out.println("studentInfo:" + stuInfo);
//            logger.info("MapSudentInfo:" + stuInfo);
//            //将输入的数据先按行进行分割
//            StringTokenizer tokenizerArticle = new StringTokenizer(stuInfo, "\n");
//            //分别对每一行进行处理
//            while (tokenizerArticle.hasMoreTokens()) {
//                // 每行按空格划分
//                StringTokenizer tokenizer = new StringTokenizer(tokenizerArticle.nextToken());
//                String name = tokenizer.nextToken();//学生姓名
//                String score = tokenizer.nextToken();//学生成绩
//                Text stu = new Text(name);
//                int intscore = Integer.parseInt(score);
//                logger.info("MapStu:" + stu.toString() + " " + intscore);
//                context.write(stu, new IntWritable(intscore));//输出学生姓名和成绩
//            }
        }

    }

    public static class MyReduce extends Reducer<Text, IntWritable, Text, IntWritable> {

        @Override
        protected void reduce(Text key, Iterable<IntWritable> values, Context context)
                throws IOException, InterruptedException {
            int sum = 0;
            Iterator<IntWritable> iterator = values.iterator();
            while (iterator.hasNext()) {
                sum += iterator.next().get(); //每块记录数
            }
            context.write(key, new IntWritable(sum));//总记录
        }

    }

    public int run(String[] args) throws Exception {

        Job job = new Job(getConf());
        job.setJarByClass(RecordCount.class);
        job.setJobName("avgscore");
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        job.setMapperClass(MyMap.class);
        job.setCombinerClass(MyReduce.class);
        job.setReducerClass(MyReduce.class);
        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);
        FileInputFormat.addInputPath(job, new Path(args[0]));//设置输入文件路径
        FileOutputFormat.setOutputPath(job, new Path(args[1]));//设置输出文件路径
        boolean success = job.waitForCompletion(true);

        return success ? 0 : 1;

    }

    public static void main(String[] args) throws Exception {
        //1. 将文件放入hdfs
//        HdfsTest.uploadFile("/home/nescafe/IdeaProjects/hadooptest/src/main/resources/student_achievemnt", "/user/jevoncode/studentin/student_achievemnt");
        String out="/user/jevoncode/sqlout/data_20180124.sql_out";
        HdfsTest.delete(out);

        //2. 修改hdfs文件权限
        /**
         * ./bin/hadoop fs -chmod 777 /user
         * ./bin/hadoop fs -chmod 777 /user/jevoncode/sqlin/data_20180124.sql
         * ./bin/hadoop fs -chmod 777 /tmp  (临时文件需要生成所需要此目录权限)
         */

        //3.提交maperreduce任务
        args = new String[]{"/user/jevoncode/sqlin/data_20180124.sql", out};
        //在eclipse 工具上配置输入和输出参数
        int ret = ToolRunner.run(new RecordCount(), args);
        System.exit(ret);
    }


}
