package cn.doitedu.mr;

import com.google.gson.Gson;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.LazyOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.MultipleOutputs;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;

import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;

/**
 * @date: 2019/7/12
 * @site: www.doitedu.cn
 * @author: hunter.d 涛哥
 * @qq: 657270652
 * @description:  解析用户画像标签json数据，并使用多路输出器，输出两类不同的结果到两个不同的文件夹
 *
 *
 * 程序运行完毕后，用以下脚本将数据转为HFile
 * hbase  org.apache.hadoop.hbase.mapreduce.ImportTsv \
 * -Dimporttsv.separator=@ \
 * -Dimporttsv.columns='HBASE_ROW_KEY,f:tags'  \
 * -Dimporttsv.bulk.output=/profile/output \
 * userprofile \
 * /out/profile
 *
 * hbase  org.apache.hadoop.hbase.mapreduce.ImportTsv \
 * -Dimporttsv.separator=, \
 * -Dimporttsv.columns='HBASE_ROW_KEY,f:g'  \
 * -Dimporttsv.bulk.output=/profile/index \
 * ids_index \
 * /out/index
 *
 * 然后，用以下脚本命令，将生成好的HFile导入Hbase表:
 * hbase org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles /profile/output userprofile
 *
 * hbase org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles /profile/index ids_index
 *
 *
 *
 */
public class UserProfile2Csv {

    public static class M extends Mapper<LongWritable, Text, Text, NullWritable> {
        Gson gson = new Gson();
        MultipleOutputs<Text, NullWritable> outputs = null;

        @Override
        protected void setup(Context context) throws IOException, InterruptedException {
            outputs = new MultipleOutputs<Text, NullWritable>(context);
        }

        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

            String line = value.toString();
            UserTags userTags = gson.fromJson(line, UserTags.class);

            String t1 = userTags.getGid() + "@" + line;
            // 用多路输出器，输出gid,tags 格式数据到 目录：d:/out/profile/
            outputs.write(new Text(t1),NullWritable.get(),"hdfs://spark01:8020/out3/profile/");



            HashMap<String, HashMap<String, Double>> ids = userTags.getIds();
            Set<Map.Entry<String, HashMap<String, Double>>> entries = ids.entrySet();

            // 获取json字段ids中所有的id标识
            HashSet<String> idSet = new HashSet<>();
            for (Map.Entry<String, HashMap<String, Double>> entry : entries) {
                Set<String> keys = entry.getValue().keySet();
                idSet.addAll(keys);
            }

            // 对idSet中的每一个id标识生成一条数据：   id,gid
            for (String s : idSet) {
                // 用多路输出器，输出id,gid 格式数据到 目录：d:/out/index/
                outputs.write(new Text(s + "," + userTags.getGid()), NullWritable.get(),"hdfs://spark01:8020/out3/index/");
            }
        }


        @Override
        protected void cleanup(Context context) throws IOException, InterruptedException {
            outputs.close();
        }
    }

    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();

        Job job = Job.getInstance(conf);

        job.setJarByClass(UserProfile2Csv.class);

        job.setMapperClass(M.class);
        job.setNumReduceTasks(0);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(NullWritable.class);

        FileInputFormat.setInputPaths(job, new Path("data/input"));
        FileOutputFormat.setOutputPath(job, new Path("data/output"));

        job.waitForCompletion(true);
    }


}
