/*
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package com.facebook.presto.hive;

import com.facebook.airlift.log.Logger;
import com.facebook.presto.hadoop.HadoopNative;
import com.facebook.presto.hive.authentication.GenericExceptionAction;
import com.facebook.presto.hive.authentication.HdfsAuthentication;
import com.facebook.presto.hive.filesystem.ExtendedFileSystem;
import org.apache.hadoop.fs.FileSystemFinalizerService;
import com.facebook.presto.spi.Plugin;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.security.UserGroupInformation;

import javax.inject.Inject;

import java.io.IOException;
import java.lang.reflect.Field;
import java.security.PrivilegedAction;

import static com.google.common.base.Preconditions.checkState;
import static java.util.Objects.requireNonNull;

public class HdfsEnvironment {
    public static Logger log = Logger.get(HdfsEnvironment.class);

    static {
        //初始化FileSystem CACHE, 并且重新Filesystem.cache的方法
        HadoopExtendedFileSystemCache.initialize();
        //加载动态链接库，默认true
        HadoopNative.requireHadoopNative();
    }

    private final HdfsConfiguration hdfsConfiguration;
    private final HdfsAuthentication hdfsAuthentication;
    private final MetastoreClientConfig metastoreClientConfig;
    private final boolean verifyChecksum;


    @Inject
    public HdfsEnvironment(
            @ForMetastoreHdfsEnvironment HdfsConfiguration hdfsConfiguration,
            MetastoreClientConfig config,
            HdfsAuthentication hdfsAuthentication) {
        this.hdfsConfiguration = requireNonNull(hdfsConfiguration, "hdfsConfiguration is null");
        this.verifyChecksum = requireNonNull(config, "config is null").isVerifyChecksum();
        this.hdfsAuthentication = requireNonNull(hdfsAuthentication, "hdfsAuthentication is null");
        this.metastoreClientConfig = requireNonNull(config, "config is null");
        //加载动态链接库，默认true
        if (config.isRequireHadoopNative()) {
            HadoopNative.requireHadoopNative();
        }
    }

    public Configuration getConfiguration(HdfsContext context, Path path) {
        return hdfsConfiguration.getConfiguration(context, path.toUri());
    }

    public ExtendedFileSystem getFileSystem(HdfsContext context, Path path)
            throws IOException {
        return getFileSystem(context.getIdentity().getUser(), path, getConfiguration(context, path));
    }

    public ExtendedFileSystem getFileSystem(String user, Path path, Configuration configuration)
            throws IOException {
        try {
            return hdfsAuthentication.doAs(user, () -> {
                //需要重新认证
                //FileSystem fileSystem = path.getFileSystem(configuration);
                FileSystem fileSystem = FileSystem.newInstance(path.toUri(), configuration, user);
                fileSystem.setVerifyChecksum(verifyChecksum);
                checkState(fileSystem instanceof ExtendedFileSystem);
                return (ExtendedFileSystem) fileSystem;
            });
        } catch (Exception e) {
            throw new RuntimeException(e);
        }

//        String krb5File = "/etc/krb5.conf";
//        System.setProperty("java.security.krb5.conf", krb5File);
//        configuration.set("hadoop.security.authentication", "kerberos");
//
//        UserGroupInformation.setConfiguration(configuration);
//        UserGroupInformation.loginUserFromKeytab("Haixin_jgj", "/etc/user.keytab");
//        log.info("===> loginUserFromKeytab 登录成功！");
//        UserGroupInformation ugi = UserGroupInformation.getLoginUser();
//        return ugi.doAs(new PrivilegedAction<ExtendedFileSystem>() {
//            @Override
//            public ExtendedFileSystem run() {
//                FileSystem fileSystem;
//                try {
//                    fileSystem = FileSystem.get(configuration);
//                } catch (IOException e) {
//                    throw new RuntimeException(e);
//                }
//                return (ExtendedFileSystem) fileSystem;
//            }
//        });

    }


    public synchronized void shutdown()
            throws IOException {
        // shut down if running in a plugin classloader
        if (!getClass().getClassLoader().equals(Plugin.class.getClassLoader())) {
            log.info("===> 开始 shutdown 文件系统....");
            //FileSystemFinalizerService.shutdown();
            //stopFileSystemStatsThread();
            PrestoExtendedFileSystemCache.INSTANCE.closeAll();
        }
    }

    public <R, E extends Exception> R doAs(String user, GenericExceptionAction<R, E> action)
            throws E {
        return hdfsAuthentication.doAs(user, action);
    }

    public void doAs(String user, Runnable action) {
        hdfsAuthentication.doAs(user, action);
    }

    private static void stopFileSystemStatsThread() {
        try {
            Field field = FileSystem.Statistics.class.getDeclaredField("STATS_DATA_CLEANER");
            field.setAccessible(true);
            ((Thread) field.get(null)).interrupt();
        } catch (ReflectiveOperationException | RuntimeException e) {
            log.error(e, "Error stopping file system stats thread");
        }
    }
}
