package paas.storage.common.config;

import org.apache.commons.lang3.concurrent.BasicThreadFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.annotation.ComponentScan;
import paas.storage.exception.RRException;

import javax.annotation.PreDestroy;
import java.io.IOException;
import java.net.URI;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;

/**
 * hbase  连接管理
 *
 * @author xufeng
 * @email 525207937@qq.com
 * @date 2021/1/29 15:01
 */
@org.springframework.context.annotation.Configuration
@ComponentScan("paas.storage")
public class ConnectionManager {
    private static Logger log = LoggerFactory.getLogger(ConnectionManager.class);

    /**
     * 连接id ,和连接
     */
    private Map<String, Connection> connectionMap = new ConcurrentHashMap<>();
    /**
     * 连接id和hadoop配置
     */
    private Map<String, org.apache.hadoop.conf.Configuration> hadoopConfig = new ConcurrentHashMap<>();


    /**
     * 获取hbase的配置
     *
     * @param connectionId
     * @return
     */
    public Connection getConnection(String connectionId) {
        if (connectionId == null) {
            throw new IllegalArgumentException("error resourceid: " + connectionId);
        }

        if (connectionMap.containsKey(connectionId)) {
            return connectionMap.get(connectionId);
        }
        return null;
    }


    /**
     * 获取hadoop的配置
     *
     * @param connectionId
     * @return
     */
    public org.apache.hadoop.conf.Configuration getHadoopConfiguration(String connectionId) {
        if (connectionId == null) {
            throw new IllegalArgumentException("error resourceid: " + connectionId);
        }
        if (hadoopConfig.containsKey(connectionId)) {
            return hadoopConfig.get(connectionId);
        }
        return null;
    }

    /**
     * 创建连接
     *
     * @param connectionId
     * @return
     */
    public Connection putConnection(String connectionId, Map<String, Map<String, Object>> config) throws IOException {
        Connection connection = null;
            if (connectionId == null) {
                throw new IllegalArgumentException("error resourceid: " + connectionId);
            }

            if (connectionMap.containsKey(connectionId)) {
                return connectionMap.get(connectionId);
            }
            synchronized (this) {
                //DCL检查
                if (connectionMap.containsKey(connectionId)) {
                    return connectionMap.get(connectionId);
                }
                org.apache.hadoop.conf.Configuration conf = HBaseConfiguration.create();

                Map<String, Object> hbase = config.get("hbase");
                //其他配置
                if (hbase != null && hbase.size() > 0) {
                    hbase.forEach((key, value) -> {
                        conf.set(key, value.toString());
                    });
                }
                Map<String, Object> hadoop = config.get("hadoop");
                //其他配置
                if (hadoop != null && hadoop.size() > 0) {
                    //windows需要设置环境变量用户.
                    System.setProperty("HADOOP_USER_NAME", hadoop.get("user").toString());
                    org.apache.hadoop.conf.Configuration configuration = getConfiguration(hadoop);
                    //测试获取FileSystem 拿不到就报错~
                    FileSystem fileSystem = createFileSystem(configuration);
                    fileSystem.resolvePath(new Path("/"));
                    fileSystem.close();
                    //从hdfs读取
                    hadoopConfig.put(connectionId, configuration);
                }
                ScheduledExecutorService executorService = new ScheduledThreadPoolExecutor(1,
                        new BasicThreadFactory.Builder().namingPattern("example-schedule-pool-%d").daemon(true).build());

                connection = ConnectionFactory.createConnection(conf, executorService);
                //测试获取admin
                Admin admin =  connection.getAdmin();
                admin.close();
                connectionMap.put(connectionId, connection);
            }

        return connection;

    }

    @PreDestroy
    public void doDestroy() {
        for (Map.Entry<String, Connection> entry : connectionMap.entrySet()) {
            Connection connection = entry.getValue();
            if (connection != null) {
                try {
                    connection.close();
                    log.info("delete connection succeed");
                } catch (IOException e) {
                    log.info("delete connection error");
                }
            }
        }
    }

    /**
     * 创建hodoop fileSystem
     *
     * @return
     * @throws IOException
     * @throws InterruptedException
     */
    public FileSystem createFileSystem(org.apache.hadoop.conf.Configuration configuration) throws IOException {
        // 文件系统
        URI uri = FileSystem.getDefaultUri(configuration);
        FileSystem fileSystem = FileSystem.get(uri, configuration);
        return fileSystem;
    }


    /**
     * 配置
     */
    private org.apache.hadoop.conf.Configuration getConfiguration(Map<String, Object> config) {
        //读取配置文件
        org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();
        if (config != null) {
            config.forEach((key, value) -> {
                conf.set(key, value.toString());
                conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
            });
        }
        return conf;
    }
}
