package com.feidee.fdhadoop.hbase;

import com.feidee.fdcommon.configuration.CustomConfiguration;
import com.feidee.fdhadoop.configUpdater.ConfigUpdaterCallback;
import com.feidee.fdhadoop.constant.Constant;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.concurrent.ConcurrentHashMap;


public class HBaseConfigUpdaterCallBack implements ConfigUpdaterCallback {
    protected static final Logger logger = LoggerFactory.getLogger(HBaseConfigUpdaterCallBack.class);

    @Override
    public void configUpdated(ConcurrentHashMap<String, String> config) {
        Connection conn;
        String active = config.get("active");
        logger.info("======>hbaseHA当前活跃集群:" + active);
        if (active == null || "".equals(active)) {
            active = Constant.HBASE_HA_DEFAULT_URL;
            logger.info("======>hbaseHA未获取到活跃集群配置，使用默认地址:" + active);
        }
        Configuration conf = HBaseConfiguration.create();
        conf.set("hbase.zookeeper.property.clientPort", CustomConfiguration.getString("hbase.zookeeper.property.clientPort", "2181"));
        conf.set("hbase.zookeeper.quorum", active);
        conf.set("hbase.client.scanner.caching", CustomConfiguration.getString("hbase.client.scanner.caching", "100"));
        conf.set("hbase.rpc.timeout", CustomConfiguration.getString("hbase.rpc.timeout", "6000"));//rpc的超时时间
        conf.set("ipc.socket.timeout", CustomConfiguration.getString("ipc.socket.timeout", "2000"));//socket建立链接的超时时间，应该小于或者等于rpc的超时时间
        conf.set("hbase.client.retries.number", CustomConfiguration.getString("hbase.client.retries.number", "3"));//重试次数
        conf.set("hbase.client.pause", CustomConfiguration.getString("hbase.client.pause", "100"));//重试休眠时间，默认为1s
        conf.set("zookeeper.recovery.retry", CustomConfiguration.getString("zookeeper.recovery.retry", "1"));//zk重试次数

        //新连接如果创建异常，则直接返回
        try {
            logger.info("======>hbaseHA创建活跃集群连接开始~");
            conn = ConnectionFactory.createConnection(conf);
            //由于创建连接时只访问了zk(获取/hbase/hbaseid)，且异常被内部吞掉
            //所以增加这一步，来对连接的可用性进行检测(会首先访问/hbase/master，然后创建到HMaster的连接进行请求)
            logger.info("======>hbaseHA调用getRegionServers方法检测连接可用性~" + conn.getAdmin().getRegionServers());
        } catch (Exception e) {
            logger.error("======>hbaseHA创建活跃集群连接失败！", e);
            return;
        }

        logger.info("======>hbaseHA创建活跃集群连接成功~");
        HBaseUtil.setConn(conn);
        logger.info("======>hbaseHA更新集群连接完成~");
    }
}
