package demo13;

import demo1.WordCoundJob2;
import demo1.WordCountJob;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;

import java.io.IOException;

public class VidemClearJob {

    public static void main(String[] args) throws Exception {
        // 1. 初始化配置
        Configuration conf = new Configuration();
        conf.set("fs.defaultFS", "hdfs://hadoop10:9000");
        //2. 创建job
        Job job = Job.getInstance(conf);
        job.setJarByClass(VidemClearJob.class);

        //3. 设置输入格式化工具和输出格式化
        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);

        //4. 设置输入路径和输出路径
        TextInputFormat.addInputPath(job, new Path("/mapreduce/hive_hw"));
        TextOutputFormat.setOutputPath(job, new Path("/mapreduce/hive_hw/video_out"));

//        TextInputFormat.addInputPath(job, new Path(args[0]));
//        TextOutputFormat.setOutputPath(job, new Path(args[1]));
        //5. 设置mapper和reducer
        job.setMapperClass(VideoClearMapper.class);


        // 6. 设置mapper的kv类型和reducer的kv类型
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(NullWritable.class);

       job.setNumReduceTasks(0);

        // 7. 启动job
        boolean b = job.waitForCompletion(true);
        if (b){
            System.out.println("运行成功！");
        }else {
            System.out.println("运行失败！");
        }
    }

    static class VideoClearMapper extends Mapper<LongWritable, Text,Text, NullWritable>{
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            // context 对象是一个工具对象，不止可以进行数据的写出，还可以添加计数器日志，更清晰的表达数据处理的过程
            context.getCounter("my_group","处理数据量").increment(1L);

            String[] arr = value.toString().split("\t");

            String category = "";  // 用于存放视频类别
            String related = "";   // 用于存放关联视频
            String rel = "";       // 用于存放最终结果
            if (arr.length >= 9 && arr.length <= 29){
                // 将类别处理一下
                 category = arr[3].replaceAll(" ","");
                // 相关视频处理
                related = "";
                for (int i = 9; i < arr.length; i++) {
                    // 如果是 i  现在就是最后一个  否则要用 & 符号
                    if (i == arr.length - 1){
                        related = related + arr[i];
                    }else {
                        related = related + arr[i] + "&";
                    }
                }

                arr[3] = category;
                if (arr.length > 9){
                    arr[9] = related;
                }
                // 为了完成自动的 10 个字段拼接，我们写一个只循环10次的for循环，当碰到特殊的下标的时候，我们单独处理

                /*for (int i = 0; i < 10; i++) {
                    if (i == 9){
                        rel = rel + arr[i];
                    }else{
                        rel = rel + arr[i] + "\t";
                    }
                }*/
                rel = String.join("\t", arr);
            }


            context.write(new Text(rel),NullWritable.get());
        }
    }
}

