package com.briup.searchengine.handle;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import java.io.IOException;
import java.util.Map;
import java.util.NavigableMap;
import java.util.Set;


/**
 * @author adam
 * @date 2022/6/10
 * 查找每个网页的关键字
 */
public class Step3_FindKeyWord extends Configured implements Tool {

    public static class FindKeyWordMapper extends TableMapper<Text, Text> {
        @Override
        protected void map(ImmutableBytesWritable key, Result value, Context context) throws IOException, InterruptedException {
            String k = new String(key.get());
            //    拿到il列族低下所有的网址和值
            NavigableMap<byte[], byte[]> ilMap = value.getFamilyMap("il".getBytes());
            //    当前网页的tile
            byte[] titleBytes = value.getValue("page".getBytes(), "t".getBytes());
            byte[] value1=null;
            if (ilMap.size()>0){
                Set<Map.Entry<byte[], byte[]>> set = ilMap.entrySet();
                //获取第一个entry的值
                value1= ilMap.firstEntry().getValue();
                //如果第一个入链的文本值为空则继续往后边取
                if (value1==null||value1.length<1){
                    for (Map.Entry<byte[], byte[]> entry : set) {
                        byte[] value2 = entry.getValue();
                        if (value2!=null&&value2.length>0){
                                 value1=value2;
                                 break;
                        }
                    }
                }
                if (value1!=null){
                    context.write(new Text(k),new Text(new String(titleBytes)+new String(value1)));
                }else {
                    context.write(new Text(k),new Text(new String(titleBytes)));
                }
            }
        }


    }
    public  static  class FindKeyWordReducer extends TableReducer<Text,Text, NullWritable>{
        @Override
        protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {

            Put put = new Put(key.toString().getBytes());
            for (Text value : values) {
                put.addColumn("page".getBytes(),"key".getBytes(),value.toString().getBytes());

            }
            context.write(NullWritable.get(),put);
        }
    }

    @Override
    public int run(String[] args) throws Exception {
        Configuration conf = getConf();
        //conf.set("hbase.zookeeper.quorum", "hadoop01:2181");
        Job job = Job.getInstance(conf, "findKeyWord");
        job.setJarByClass(this.getClass());
        //    mapper 查数据 提取title和第一个不为空的入链内容
        TableMapReduceUtil.initTableMapperJob("clean_webpage",new Scan(),FindKeyWordMapper.class,Text.class,Text.class,job);
        //    reducer 往clean_webpage 列族 page  插入新列key
        TableMapReduceUtil.initTableReducerJob("clean_webpage", FindKeyWordReducer.class,job);
       return job.waitForCompletion(true)?0:-1;
    }

    public static void main(String[] args) throws Exception {
        System.exit(ToolRunner.run(new Step3_FindKeyWord(),args));
    }
}
