package org.sxp.common.utils.datax.writer;

import cn.hutool.core.util.ArrayUtil;
import cn.hutool.core.util.StrUtil;
import cn.hutool.json.JSONUtil;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.google.common.collect.Maps;
import org.sxp.common.dto.datax.writer.DataXHiveWriter;
import org.sxp.common.dto.datax.writer.DataXRDMSWriter;
import org.sxp.common.utils.SpringContextUtils;
import org.sxp.modules.datasource.entity.DatabaseDO;
import org.sxp.modules.datasource.service.DatabaseService;
import org.sxp.modules.dataworks.dto.SyncDTO;

import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;

/**
 * mysql writer构建类
 *
 * @author zhouhongfa@gz-yibo.com
 * @ClassName MysqlWriter
 * @Version 1.0
 * @since 2019/7/30 23:08
 */
public class HiveWriter extends BaseWriter implements IDataxWriter {

    public static final String SPLIT_KEY = "/";

    public HiveWriter(SyncDTO syncDTO) {
        super(syncDTO);
    }

    @Override
    public String getName() {
        return "hdfswriter";
    }

    @Override
    public Map<String, Object> build() {
        DatabaseService databaseService = SpringContextUtils.getBean(DatabaseService.class);
        DatabaseDO jobDatasource = databaseService.getById(syncDTO.getTargetDb());
        Map<String, Object> writerObj = Maps.newLinkedHashMap();
        writerObj.put("name", getName());

        Map<String, Object> parameterObj = Maps.newLinkedHashMap();
        parameterObj.put("defaultFS", jobDatasource.getHdfsUrl());
        /**
         * 文件的类型，目前只支持用户配置为"text"或"orc"
         * text表示textfile文件格式
         * orc表示orcfile文件格式
         * 必选：是
         */
        parameterObj.put("fileType", syncDTO.getTargetFileType());
        /**
         * 存储到Hadoop hdfs文件系统的路径信息，HdfsWriter会根据并发配置在Path目录下写入多个文件。
         * 为与hive表关联，请填写hive表在hdfs上的存储路径。
         * 例：Hive上设置的数据仓库的存储路径为：/user/hive/warehouse/ ，已建立数据库：test，表：hello；
         * 则对应的存储路径为：/user/hive/warehouse/test.db/hello
         * 必选：是
         */
        if(StrUtil.isBlank(syncDTO.getTargetPath())){
            syncDTO.setTargetPath("/user/hive/warehouse");
        }
        if(syncDTO.getTargetPath().contains(SPLIT_KEY + syncDTO.getTargetTable())){
            parameterObj.put("path", syncDTO.getTargetPath());
        }else{
            parameterObj.put("path", syncDTO.getTargetPath() + (syncDTO.getTargetPath().endsWith(SPLIT_KEY) ? "" : SPLIT_KEY) + jobDatasource.getDbName() + ".db/" + syncDTO.getTargetTable());
        }
        /**
         * HdfsWriter写入时的文件名，实际执行时会在该文件名后添加随机的后缀作为每个线程写入实际文件名
         * 必选：是
         */
        parameterObj.put("fileName", syncDTO.getTargetFileName());
        parameterObj.put("column", syncDTO.getTargetField());
        /**
         * hdfswriter写入前数据清理处理模式
         * append，写入前不做任何处理，DataX hdfswriter直接使用filename写入，并保证文件名不冲突
         * nonConflict，如果目录下有fileName前缀的文件，直接报错
         */
        if(StrUtil.isNotBlank(syncDTO.getWriteMode())){
            parameterObj.put("writeMode", syncDTO.getWriteMode());
        }
        /**
         * hdfswriter写入时的字段分隔符
         * 必选：是
         */
        parameterObj.put("fieldDelimiter", syncDTO.getTargetFieldDelimiter());
        /**
         * hdfs文件压缩类型，默认不填写意味着没有压缩
         */
        parameterObj.put("compress", syncDTO.getTargetCompress());
        parameterObj.put("encoding", syncDTO.getTargetEncoding());
        if(syncDTO.getTargetHaveKerberos() != null){
            parameterObj.put("haveKerberos", syncDTO.getTargetHaveKerberos());
            parameterObj.put("kerberosKeytabFilePath", syncDTO.getTargetKerberosKeytabFilePath());
            parameterObj.put("kerberosPrincipal", syncDTO.getTargetKerberosPrincipal());
        }
        if(StrUtil.isNotBlank(syncDTO.getTargetHadoopConfig())){
            parameterObj.put("hadoopConfig", JSON.parseObject(syncDTO.getTargetHadoopConfig()));
        }

        writerObj.put("parameter", parameterObj);

        return writerObj;
    }

    @Override
    public void backBuild() {
        DatabaseService databaseService = SpringContextUtils.getBean(DatabaseService.class);
        DatabaseDO jobDatasource = databaseService.getById(syncDTO.getTargetDb());

        String removeSpace = StrUtil.trim(syncDTO.getDataxJson());
        /*
         * 根据key和josn字符串取出特定的value :{"(.*?)"}
         */
        Matcher matcher = pattern.matcher(removeSpace);
        String value = null;
        if (matcher.find()) {
            value = matcher.group(1);
        }
        if(JSONUtil.isJson(value)){
            DataXHiveWriter writer = JSON.parseObject(value, DataXHiveWriter.class);
            if(ArrayUtil.isNotEmpty(writer.getParameter().getFieldDelimiter())){
                syncDTO.setTargetFieldDelimiter(writer.getParameter().getFieldDelimiter());
            }
            if(ArrayUtil.isNotEmpty(writer.getParameter().getFileType())){
                syncDTO.setTargetFileType(writer.getParameter().getFileType());
            }
            if(ArrayUtil.isNotEmpty(writer.getParameter().getFileName())){
                syncDTO.setTargetFileName(writer.getParameter().getFileName());
            }
            if(StrUtil.isNotBlank(writer.getParameter().getPath())){
                syncDTO.setTargetPath(writer.getParameter().getPath().replace(SPLIT_KEY + jobDatasource.getDbName() + ".db/" + syncDTO.getTargetTable(), ""));
            }
        }

        afterBackBuild();
    }
}
