package com.bigdata.hbasedemo.consumer;

import com.bigdata.hbasedemo.producer.Commons;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;

import java.io.IOException;

/**
 * 写入HBase
 * 继承RichSinkFunction重写父类方法
 *
 * 写入hbase时500条flush一次, 批量插入, 使用的是writeBufferSize
 */
public class HbaseWriter extends RichSinkFunction<String> {

    private static org.apache.hadoop.conf.Configuration config;
    private static Connection connection;
    private static BufferedMutator mutator;
    private static int count = 0;
    private static final Logger logger = LogManager.getLogger(HbaseWriter.class);

    @Override
    public void open(Configuration parameters) throws Exception {
        config = HBaseConfiguration.create();
        config.set("hbase.master", Commons.EG_HOST + ":16000");
        config.set(HConstants.ZOOKEEPER_QUORUM, Commons.EG_HOST);
        config.set(HConstants.ZOOKEEPER_CLIENT_PORT, "2181");
        config.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 30000);
        config.setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 30000);
        try {
            connection = ConnectionFactory.createConnection(config);
        } catch (IOException e) {
            e.printStackTrace();
        }
        //创建对应表
        TableName tableName = TableName.valueOf(Commons.EG_TABLE_NAME);
        Admin admin = connection.getAdmin();
        if(!admin.tableExists(tableName)){
            //1）hbase创建表旧方式(过时)
            // admin.createTable(new HTableDescriptor(tableName)
            // .addFamily(new HColumnDescriptor("cf1")));
            //2）hbase创建表新方式
            //定义表描述对象
            TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(tableName);
            //添加列族描述对象
            ColumnFamilyDescriptor columnFamily = ColumnFamilyDescriptorBuilder.of("cf1");
            tableDescriptorBuilder.setColumnFamily(columnFamily);
            admin.createTable(tableDescriptorBuilder.build());
            logger.info("The table {} created", tableName.getNameAsString());
        }else{
            logger.info("The table {} already exists", tableName.getNameAsString());
        }
        BufferedMutatorParams params = new BufferedMutatorParams(tableName);
        params.writeBufferSize(1024 * 1024); //设置缓存大小为1M
        mutator = connection.getBufferedMutator(params);

    }

    @Override
    public void invoke(String value, Context context) throws Exception {
        String cf1 = "cf1";
        String[] arr = value.split(", ");
        System.out.println(value);
        Put put = new Put(Bytes.toBytes(arr[0]));
        put.addColumn(Bytes.toBytes(cf1), Bytes.toBytes("temperature"), Bytes.toBytes(arr[1]));
        put.addColumn(Bytes.toBytes(cf1), Bytes.toBytes("operationData"), Bytes.toBytes(arr[2]));
        mutator.mutate(put);
        //每500条刷新一次
        if(count >= 500){
            mutator.flush();
            logger.info("写入刷新一次");
            count = 0;
        }
        logger.info("写入 {} 条", count);
        count++;

    }

    @Override
    public void close() throws Exception {
        if(mutator!=null){
            mutator.close();
        }
        if(connection!=null){
            connection.close();
        }
    }
}

