package com.central.datax.plugin.writer.hivewriter;

import com.alibaba.datax.common.exception.DataXException;
import com.alibaba.datax.common.plugin.RecordReceiver;
import com.alibaba.datax.common.spi.Writer;
import com.alibaba.datax.common.util.Configuration;
import com.google.common.collect.Sets;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hive.jdbc.Utils;
import org.apache.hive.jdbc.ZooKeeperHiveClientException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.*;

/**
 * @author Tindy
 * @date 2022/3/8
 * @describe
 */
public class HiveWriter extends Writer {
    public static org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();
    public static class Job extends Writer.Job {
        private static final Logger LOG = LoggerFactory.getLogger(Job.class);

        private Configuration writerSliceConfig = null;

        private String defaultFS;
        private String path;
        private String fileName;
        private List<Configuration> columns;
        private String writeMode;
        private HashSet<String> tmpFiles = new HashSet<String>();//临时文件全路径
        private HashSet<String> endFiles = new HashSet<String>();//最终文件全路径

        private HdfsHelper hdfsHelper = null;

        private HiveHelper hiveHelper=null;
        private String jdbcUrl;
        private String username;
        private String password;
        private String table;
        private String tmpTable;
        @Override
        public List<Configuration> split(int mandatoryNumber) {
            LOG.info("begin do split...");
            List<Configuration> writerSplitConfigs = new ArrayList<Configuration>();
            String filePrefix = fileName;

            Set<String> allFiles = new HashSet<String>();

            //获取该路径下的所有已有文件列表
            if(hdfsHelper.isPathexists(path)){
                allFiles.addAll(Arrays.asList(hdfsHelper.hdfsDirList(path)));
            }

            String fileSuffix;
            //临时存放路径
            String storePath =  buildTmpFilePath(this.path);
            //最终存放路径
            String endStorePath = buildFilePath();
            this.path = endStorePath;
            for (int i = 0; i < mandatoryNumber; i++) {
                // handle same file name

                Configuration splitedTaskConfig = this.writerSliceConfig.clone();
                String fullFileName = null;
                String endFullFileName = null;

                fileSuffix = UUID.randomUUID().toString().replace('-', '_');

                fullFileName = String.format("%s%s%s__%s", defaultFS, storePath, filePrefix, fileSuffix);
                endFullFileName = String.format("%s%s%s__%s", defaultFS, endStorePath, filePrefix, fileSuffix);

                while (allFiles.contains(endFullFileName)) {
                    fileSuffix = UUID.randomUUID().toString().replace('-', '_');
                    fullFileName = String.format("%s%s%s__%s", defaultFS, storePath, filePrefix, fileSuffix);
                    endFullFileName = String.format("%s%s%s__%s", defaultFS, endStorePath, filePrefix, fileSuffix);
                }
                allFiles.add(endFullFileName);

                //设置临时文件全路径和最终文件全路径

                this.tmpFiles.add(fullFileName);
                this.endFiles.add(endFullFileName);

                splitedTaskConfig
                        .set(Key.FILE_NAME,
                                fullFileName);

                LOG.info(String.format("splited write file name:[%s]",
                        fullFileName));

                writerSplitConfigs.add(splitedTaskConfig);
            }
            LOG.info("end do split.");
            return writerSplitConfigs;
        }

        /**
         * 创建临时目录
         * @param userPath
         * @return
         */
        private String buildTmpFilePath(String userPath) {
            String tmpFilePath;
            boolean isEndWithSeparator = false;
            switch (IOUtils.DIR_SEPARATOR) {
                case IOUtils.DIR_SEPARATOR_UNIX:
                    isEndWithSeparator = userPath.endsWith(String
                            .valueOf(IOUtils.DIR_SEPARATOR));
                    break;
                case IOUtils.DIR_SEPARATOR_WINDOWS:
                    isEndWithSeparator = userPath.endsWith(String
                            .valueOf(IOUtils.DIR_SEPARATOR_WINDOWS));
                    break;
                default:
                    break;
            }
            String tmpSuffix;
            tmpSuffix = UUID.randomUUID().toString().replace('-', '_');
            if (!isEndWithSeparator) {
                tmpFilePath = String.format("%s__%s%s", userPath, tmpSuffix, IOUtils.DIR_SEPARATOR_UNIX);
            }else if("/".equals(userPath)){
                tmpFilePath = String.format("%s__%s%s", userPath, tmpSuffix, IOUtils.DIR_SEPARATOR);
            }else{
                tmpFilePath = String.format("%s__%s%s", userPath.substring(0,userPath.length()-1), tmpSuffix, IOUtils.DIR_SEPARATOR);
            }
            while(hdfsHelper.isPathexists(tmpFilePath)){
                tmpSuffix = UUID.randomUUID().toString().replace('-', '_');
                if (!isEndWithSeparator) {
                    tmpFilePath = String.format("%s__%s%s", userPath, tmpSuffix, IOUtils.DIR_SEPARATOR_UNIX);
                }else if("/".equals(userPath)){
                    tmpFilePath = String.format("%s__%s%s", userPath, tmpSuffix, IOUtils.DIR_SEPARATOR);
                }else{
                    tmpFilePath = String.format("%s__%s%s", userPath.substring(0,userPath.length()-1), tmpSuffix, IOUtils.DIR_SEPARATOR);
                }
            }
            return tmpFilePath;
        }
        private String buildFilePath() {
            boolean isEndWithSeparator = false;
            switch (IOUtils.DIR_SEPARATOR) {
                case IOUtils.DIR_SEPARATOR_UNIX:
                    isEndWithSeparator = this.path.endsWith(String
                            .valueOf(IOUtils.DIR_SEPARATOR));
                    break;
                case IOUtils.DIR_SEPARATOR_WINDOWS:
                    isEndWithSeparator = this.path.endsWith(String
                            .valueOf(IOUtils.DIR_SEPARATOR_WINDOWS));
                    break;
                default:
                    break;
            }
            if (!isEndWithSeparator) {
                this.path = this.path + IOUtils.DIR_SEPARATOR_UNIX;
            }
            return this.path;
        }

        @Override
        public void init() {
            this.writerSliceConfig = this.getPluginJobConf();
            validateParameter();
            hdfsHelper = new HdfsHelper(this.writerSliceConfig);
            hiveHelper = new HiveHelper(this.writerSliceConfig);
            this.columns = this.writerSliceConfig.getListConfiguration(Key.COLUMN);
        }

        @Override
        public void prepare() {
            this.createTmpTable();
            SingleFS.getInstance().setFileSystem(this.hdfsHelper.getFileSystem());
            this.setConfig();
        }

        private void setConfig() {
            //fileName
            this.fileName = "tmp";
            //writeMode check
            this.writeMode = this.writerSliceConfig.getString(Key.WRITE_MODE);
            writeMode = writeMode.toLowerCase().trim();
            Set<String> supportedWriteModes = Sets.newHashSet("into", "overwrite");
            if (!supportedWriteModes.contains(writeMode)) {
                throw DataXException.asDataXException(HiveWriterErrorCode.ILLEGAL_VALUE,
                        String.format("仅支持into, overwrite两种模式, 不支持您配置的 writeMode 模式 : [%s]",
                                writeMode));
            }
            this.writerSliceConfig.set(Key.WRITE_MODE, writeMode);
        }

        private void createTmpTable() {
            LOG.info("begin do create tmp table ...");
            this.table=this.writerSliceConfig.getString(Key.TABLE);
            this.tmpTable=table+"_tmp_"+ UUID.randomUUID().toString().replace('-', '_');
            String location=hiveHelper.createTmpTable(this.tmpTable,this.columns);
            int splitIndex=location.indexOf("/",8);
            try {
                Map<String, String> sessionVars = Utils.parseURL(this.writerSliceConfig.getString(Key.JDBC_URL)).getSessionVars();
                this.defaultFS = sessionVars.getOrDefault("defaultFS",location.substring(0,splitIndex));
            } catch (SQLException e) {
                e.printStackTrace();
            } catch (ZooKeeperHiveClientException e) {
                e.printStackTrace();
            }
            writerSliceConfig.set(Key.DEFAULT_FS,this.defaultFS);
            this.path=location.substring(splitIndex,location.length());
            LOG.info("create tmp table "+tmpTable+" end");

        }

        private void validateParameter() {
            //Kerberos check
            Boolean haveKerberos = this.writerSliceConfig.getBool(Key.HAVE_KERBEROS, false);
            if(haveKerberos) {
                this.writerSliceConfig.getNecessaryValue(Key.KERBEROS_CONF_FILE_PATH,HiveWriterErrorCode.REQUIRED_VALUE);
                this.writerSliceConfig.getNecessaryValue(Key.KERBEROS_KEYTAB_FILE_PATH, HiveWriterErrorCode.REQUIRED_VALUE);
                this.writerSliceConfig.getNecessaryValue(Key.KERBEROS_PRINCIPAL, HiveWriterErrorCode.REQUIRED_VALUE);
            }
            this.writerSliceConfig.getNecessaryValue(Key.JDBC_URL,HiveWriterErrorCode.REQUIRED_VALUE);
            this.writerSliceConfig.getNecessaryValue(Key.TABLE,HiveWriterErrorCode.REQUIRED_VALUE);
            this.writerSliceConfig.getNecessaryValue(Key.COLUMN,HiveWriterErrorCode.REQUIRED_VALUE);
            this.writerSliceConfig.getNecessaryValue(Key.WRITE_MODE, HiveWriterErrorCode.REQUIRED_VALUE);
        }
        @Override
        public void post() {
            hdfsHelper.renameFile(tmpFiles, endFiles);
            hiveHelper.moveData(tmpTable,table,writeMode);
            tmpTable=null;
        }
        @Override
        public void destroy() {
            if(tmpTable!=null){
                hiveHelper.dropTable(tmpTable);
            }
            hiveHelper.closeConn();
            hdfsHelper.close();
        }
    }
    public static class Task extends Writer.Task {
        private static final Logger LOG = LoggerFactory.getLogger(Task.class);
        private Configuration writerSliceConfig;

        private String fileName;

        private HdfsHelper hdfsHelper = null;
        @Override
        public void startWrite(RecordReceiver lineReceiver) {
            LOG.info("begin do write...");
            LOG.info(String.format("write to file : [%s]", this.fileName));
            //写ORC FILE
            hdfsHelper.orcFileStartWrite(lineReceiver,this.writerSliceConfig, this.fileName,
                    this.getTaskPluginCollector());
            LOG.info("end do write");
        }

        @Override
        public void init() {
            this.writerSliceConfig = this.getPluginJobConf();

            //得当的已经是绝对路径，eg：hdfs://10.101.204.12:9000/user/hive/warehouse/writer.db/text/test.textfile
            this.fileName = this.writerSliceConfig.getString(Key.FILE_NAME);

            hdfsHelper = new HdfsHelper(writerSliceConfig);
            this.hdfsHelper.setFileSystem(SingleFS.getInstance().getFileSystem());

        }

        @Override
        public void destroy() {
        }
    }
}
