package com.qm.datax.helper;

import cn.hutool.json.JSONObject;
import cn.hutool.json.JSONUtil;
import com.qm.datax.beans.Table;
import com.qm.datax.configuration.Configuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import static cn.hutool.core.lang.Console.log;

/**
 * @Author：wangshali
 * @CreateTime：2024-10-31
 * @Description：
 * @Source:
 * @Sink:
 * @Version：v1.0 同步mysql数据的方式有增量di和全量df，所有json有2种格式
 * 同步hdfs数据到ck只有一种
 */

public class DataxJsonHelper {


    private static final Logger log = LoggerFactory.getLogger(DataxJsonHelper.class);
    // Hadoop 单点集群
    private final JSONObject stgConfig_df = JSONUtil.parseObj("{\"job\":{\"setting\":{\"speed\":{\"channel\":4}},\"content\":[{\"reader\":{\"name\":\"mysqlreader\",\"parameter\":{\"column\":[],\"connection\":[{\"jdbcUrl\":[$JDBCURL],\"table\":[]}],\"username\":\"$USER_NAME\",\"password\":\"$PASSWORD\",\"splitPk\":\"id\"}},\"writer\":{\"name\":\"hdfswriter\",\"parameter\":{\"column\":[],\"compress\":\"NONE\",\"defaultFS\":\"$HDFS_URL\",\"fieldDelimiter\":\"\\u0001\",\"fileName\":\"$TRGT_TABLE_NAME\",\"fileType\":\"text\",\"path\":\"${HIVE_DB_DIR}px_stg.db/$TRGT_TABLE_NAME/dt=$PARTITION\",\"writeMode\":\"OVERWRITE\"}}}]}}");
    private final JSONObject stgConfig_di = JSONUtil.parseObj("{\"job\":{\"setting\":{\"speed\":{\"channel\":4}},\"content\":[{\"reader\":{\"name\":\"mysqlreader\",\"parameter\":{\"username\":\"$USER_NAME\",\"password\":\"$PASSWORD\",\"connection\":[{\"jdbcUrl\":[\"$JDBCURL\"],\"querySql\":[]}]}},\"writer\":{\"name\":\"hdfswriter\",\"parameter\":{\"column\":[],\"compress\":\"NONE\",\"defaultFS\":\"$HDFS_URL\",\"fieldDelimiter\":\"\\u0001\",\"fileName\":\"$TRGT_TABLE_NAME\",\"fileType\":\"text\",\"path\":\"${HIVE_DB_DIR}px_stg.db/$TRGT_TABLE_NAME/dt=$PARTITION\",\"writeMode\":\"OVERWRITE\"}}}]}}");
    private final JSONObject dassConfig = JSONUtil.parseObj("{\"job\":{\"setting\":{\"speed\":{\"channel\":4}},\"content\":[{\"reader\":{\"name\":\"hdfsreader\",\"parameter\":{\"path\":\"${HIVE_DB_DIR}px_ads.db/$TRGT_TABLE_NAME/dt=$PARTITION\",\"defaultFS\":\"$HDFS_URL\",\"column\":[\"*\"],\"fileType\":\"text\",\"encoding\":\"UTF-8\",\"fieldDelimiter\":\"\\u0001\",\"nullFormat\":\"\\\\N\"}},\"writer\":{\"name\":\"clickhousewriter\",\"parameter\":{\"username\":\"$USER_NAME\",\"password\":\"$PASSWORD\",\"column\":[],\"preSql\":[\"ALTER TABLE $TRGT_TABLE_NAME DROP PARTITION '$CK_PARTITION'\"],\"connection\":[{\"jdbcUrl\":\"$JDBCURL\",\"table\":[\"$TRGT_TABLE_NAME\"]}]}}}]}}");

    // Hadoop HA 集群
//    private final JSONObject stgConfig_df = JSONUtil.parseObj("{\"job\": {\"content\": [{\"reader\": {\"name\": \"mysqlreader\",\"parameter\": {\"column\": [],\"connection\": [{\"jdbcUrl\": [],\"table\": []}],\"password\": \"\",\"splitPk\": \"\",\"username\": \"\"}},\"writer\": {\"name\": \"hdfswriter\",\"parameter\": {\"column\": [],\"compress\": \"NONE\",\"defaultFS\": \"hdfs://mycluster\",\"dfs.nameservices\": \"mycluster\",\"dfs.ha.namenodes.mycluster\": \"namenode1,namenode2\",\"dfs.namenode.rpc-address.aliDfs.namenode1\": \"hdfs://com.tstzyls-hadoop101:8020\",\"dfs.namenode.rpc-address.aliDfs.namenode2\": \"hdfs://com.tstzyls-hadoop102:8020\",\"dfs.client.failover.proxy.provider.mycluster\": \"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider\",\"fieldDelimiter\": \"\\u0001\",\"fileName\": \"content\",\"fileType\": \"text\",\"path\": \"${targetdir}\",\"writeMode\": \"OVERWRITE\",\"nullFormat\": \"\"}}}],\"setting\": {\"speed\": {\"channel\": 1}}}}");
//    private final JSONObject dassConfig = JSONUtil.parseObj("{\"job\": {\"setting\": {\"speed\": {\"channel\": 1}},\"content\": [{\"reader\": {\"name\": \"hdfsreader\",\"parameter\": {\"path\": \"${exportdir}\",\"defaultFS\": \"\",\"dfs.nameservices\": \"mycluster\",\"dfs.ha.namenodes.mycluster\": \"namenode1,namenode2\",\"dfs.namenode.rpc-address.aliDfs.namenode1\": \"hdfs://com.tstzyls-hadoop101:8020\",\"dfs.namenode.rpc-address.aliDfs.namenode2\": \"hdfs://com.tstzyls-hadoop102:8020\",\"dfs.client.failover.proxy.provider.mycluster\": \"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider\",\"column\": [\"*\"],\"fileType\": \"text\",\"encoding\": \"UTF-8\",\"fieldDelimiter\": \"\\u0001\",\"nullFormat\": \"\\\\N\"}},\"writer\": {\"name\": \"mysqlwriter\",\"parameter\": {\"writeMode\": \"replace\",\"username\": \"\",\"password\": \"\",\"column\": [],\"connection\": [{\"jdbcUrl\": [],\"table\": []}]}}}]}}");

    public DataxJsonHelper() {

    }

    //根据config给json的参数设置值  这里我们不用填充 脚本里用变量
    public void setJsonParams() {
        // 获取 Reader 和 Writer 配置
        JSONObject mysqlReaderPara = stgConfig_df.getByPath("job.content[0].reader.parameter", JSONObject.class);
        JSONObject hdfsWriterPara = stgConfig_df.getByPath("job.content[0].writer.parameter", JSONObject.class);
        JSONObject hdfsReaderPara = dassConfig.getByPath("job.content[0].reader.parameter", JSONObject.class);
        JSONObject ckWriterPara = dassConfig.getByPath("job.content[0].writer.parameter", JSONObject.class);


        hdfsReaderPara.set("defaultFS", Configuration.HDFS_URI);
        ckWriterPara.set("defaultFS", Configuration.HDFS_URI);


        mysqlReaderPara.set("username", Configuration.MYSQL_USER);
        ckWriterPara.set("username", Configuration.MYSQL_USER);


        mysqlReaderPara.set("password", Configuration.MYSQL_PASSWORD);
        ckWriterPara.set("password", Configuration.MYSQL_PASSWORD);


        mysqlReaderPara.putByPath("connection[0].jdbcUrl[0]", Configuration.MYSQL_URL);
        ckWriterPara.putByPath("connection[0].jdbcUrl", Configuration.CK_URL);

        //写回Reader和Writer配置
        stgConfig_df.putByPath("job.content[0].reader.parameter", mysqlReaderPara);
        stgConfig_df.putByPath("job.content[0].writer.parameter", hdfsWriterPara);
        dassConfig.putByPath("job.content[0].reader.parameter", hdfsReaderPara);
        dassConfig.putByPath("job.content[0].writer.parameter", ckWriterPara);
    }

    //整库同步时调用
//    public void setTableAndColumns(Table table, int index, String migrationType) {
//        // 设置表名
//        setTable(table, index, migrationType);
//        // 设置列名
//        setColumns(table, migrationType);
//    }


    public void setColumns(Table table, String migrationType, String synchronizeType) {

        if ("m2h".equals(migrationType) && "df".equals(synchronizeType)) {

            stgConfig_df.putByPath("job.content[0].reader.parameter.column", table.getColumnNames());
            stgConfig_df.putByPath("job.content[0].writer.parameter.column", table.getColumnNamesAndTypes());

        } else if ("m2h".equals(migrationType) && "di".equals(synchronizeType)) {

            stgConfig_di.putByPath("job.content[0].reader.parameter.connection[0].querySql", table.getQuerySql());
            stgConfig_di.putByPath("job.content[0].writer.parameter.column", table.getColumnNamesAndTypes());

        } else {

            dassConfig.putByPath("job.content[0].reader.parameter.column", table.getColumnAndIndex());
            dassConfig.putByPath("job.content[0].writer.parameter.column", table.getColumnNames());

        }
    }


    public void setTable(Table table,  String migrationType) {

            stgConfig_df.putByPath("job.content[0].reader.parameter.connection[0].table[0]", table.name());

           // dassConfig.putByPath("job.content[0].writer.parameter.connection[0].table[" + 0 + "]", table.name());

    }

    public JSONObject getStgConfig(String synchronizeType) {
        if ("df".equals(synchronizeType)) {
            return stgConfig_df;
        } else if ("di".equals(synchronizeType)) {
            return stgConfig_di;
        }

        return null;

    }

    public JSONObject getDassConfig() {
        return dassConfig;
    }

}
