package com.shujia.filter;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CompareOperator;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.filter.*;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;

import java.io.IOException;
import java.util.ArrayList;

public class Code01RegexColumnFilter {
    Connection connection;
    @Before
    public void getConnection(){
        Configuration configuration = new Configuration();

        // 注意：需要在当前平台中配置node1 node2的主机映射
        configuration.set("hbase.zookeeper.quorum","192.168.163.100,node1,node2");
        try {
            connection  = ConnectionFactory.createConnection(configuration);
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }

    @Test
    public void getClazz() throws IOException {
        Table table = connection.getTable(TableName.valueOf("api:stu"));

        Scan scan = new Scan();

        // 需求：对于班级为文科班的学生数据进行提取
        //  由于是对clazz列进行过滤，所以需要使用单列值过滤器
        //   public SingleColumnValueFilter(final byte [] family, final byte [] qualifier,
        //                                 final CompareOperator op, final byte[] value)
        SingleColumnValueFilter singleColumnValueFilter = new SingleColumnValueFilter(
                "info".getBytes(),
                "clazz".getBytes(),
                CompareOperator.EQUAL, // 比较操作符
                "文科一班".getBytes()
        );
        scan.setFilter(singleColumnValueFilter);

        ResultScanner scanner = table.getScanner(scan);

        for (Result result : scanner) {
            ResultUtil.printRes(result);
        }
    }

    @Test
    public void getClazz2() throws IOException {
        Table table = connection.getTable(TableName.valueOf("api:stu"));

        Scan scan = new Scan();

        // 需求：对于班级为文科班的学生数据进行提取
        //  由于是对clazz列进行过滤，所以需要使用单列值过滤器


        // public SingleColumnValueFilter(final byte [] family, final byte [] qualifier,
        //      final CompareOperator op,
        //      final org.apache.hadoop.hbase.filter.ByteArrayComparable comparator) {

        //  SingleColumnValueFilter 单列值过滤器
//        SingleColumnValueFilter singleColumnValueFilter = new SingleColumnValueFilter(
//                "info".getBytes(),
//                "clazz".getBytes(),
//                CompareOperator.EQUAL,
////                new ByteArrayComparable()
//                new RegexStringComparator("文科*") // 正则比较器
//        );

        // 方式2：
        SingleColumnValueFilter singleColumnValueFilter = new SingleColumnValueFilter(
                "info".getBytes(),
                "clazz".getBytes(),
                CompareOperator.EQUAL,
//                new ByteArrayComparable()
                new SubstringComparator("文科") // 子串比较器
        );


        scan.setFilter(singleColumnValueFilter);

        ResultScanner scanner = table.getScanner(scan);

        for (Result result : scanner) {
            ResultUtil.printRes(result);
        }
    }




    @After
    public void close(){
        try {
            connection.close();
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }
}
