package com.patsnap.data.npd.dw.etl.job;

import com.patsnap.one.etl.flink.job.AbstractFlinkJobLauncher;
import com.patsnap.one.etl.tool.datasource.DataSourceAssembler;
import com.patsnap.one.etl.tool.kafka.KafkaConfig;
import org.apache.flink.api.common.RuntimeExecutionMode;
import org.apache.flink.api.common.io.FileInputFormat;
import org.apache.flink.api.common.io.FilePathFilter;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.io.TextInputFormat;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.core.fs.Path;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.source.FileProcessingMode;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.AdminClientConfig;
import org.apache.kafka.clients.admin.NewTopic;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.Collections;
import java.util.Map;
import java.util.Properties;

public class CdcMessageProducerJob  extends AbstractFlinkJobLauncher {
    private static final Logger LOGGER = LoggerFactory.getLogger(CdcMessageProducerJob.class);
    public static void main(String[] args) throws Exception {
        new CdcMessageProducerJob().laucher(args);
    }

    @Override
    protected void laucher(String[] args) throws Exception {
        // #2
        ParameterTool parameterTool = mergeApolloConfig(ParameterTool.fromArgs(args));

        // parallelism config
        // #3
        int sourceParallelism = Integer.parseInt(parameterTool.get("source_parallelism", "1"));

        Map<String, KafkaConfig> kafkaConfigMap = DataSourceAssembler.buildKafkaConfigsFromParameterTool(parameterTool);
        KafkaConfig sourceKafkaConfig = kafkaConfigMap.get("source");
        KafkaConfig targetKafkaConfig = kafkaConfigMap.get("target");
        // init kafka String consumer

        // kafka config
        // #4
//        Map<String, KafkaConfig> kafkaConfigMap = DataSourceAssembler.buildKafkaConfigsFromParameterTool(parameterTool);
//        KafkaConfig sourceKafkaConfig = kafkaConfigMap.get("source");
//        createKafkaTopic(sourceKafkaConfig);
        // job config
        // #5
        String jobName = parameterTool.get("job_name") + "_docdb_producer";

        // #8
        StreamExecutionEnvironment env = getExecutionEnvironment(sourceParallelism, parameterTool, args, CdcMessageProducerJob.class.getName());
        // #9
        enableCheckpointing(env, parameterTool);
        FlinkKafkaProducer kafkaProducer = new FlinkKafkaProducer(sourceKafkaConfig.getBrokerList(), sourceKafkaConfig.getTopics(), new SimpleStringSchema());
        ///PATENT/DOCDB/LIST/docdb_list.txt
        String s3Path = parameterTool.get("s3_path");
        fromFile(env,s3Path).setParallelism(50).addSink(kafkaProducer).setParallelism(10);
        env.setRuntimeMode(RuntimeExecutionMode.BATCH).execute(jobName);



    }

    private void createKafkaTopic(KafkaConfig config){
        // 设置 Kafka 服务器的连接配置
        Properties properties = new Properties();
        properties.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, config.getBrokerList());

        // 创建 AdminClient 实例
        try (AdminClient adminClient = AdminClient.create(properties)) {
            //判断一个topic是否存在
            boolean exists = adminClient.listTopics().names().get().stream().anyMatch(topicName -> topicName.equals(config.getTopics()));
            if(exists){
                return;
            }
            // 创建一个 NewTopic 对象，并设置主题名称、分区数和副本因子
            NewTopic newTopic = new NewTopic(config.getTopics(), 1, (short) 1);

            // 设置主题的配置属性，包括 "retention.ms" 属性
//            newTopic.configs(Collections.singletonMap("retention.ms", String.valueOf(TimeUnit.DAYS.toMillis(20))));

            // 使用 AdminClient 创建主题
            adminClient.createTopics(Collections.singleton(newTopic)).all().get();
            LOGGER.info("Topic {} created successfully.", config.getTopics());
        } catch (Exception e) {
            LOGGER.info("Topic {} created failed.", e);
            throw new RuntimeException(e);
        }
    }

    private SingleOutputStreamOperator fromFile(StreamExecutionEnvironment env, String s3Path) throws Exception{
        FileInputFormat fileInputFormat = new TextInputFormat(new Path(s3Path));
        FilePathFilter filePathFilter = FilePathFilter.createDefaultFilter();
        fileInputFormat.setFilesFilter(filePathFilter);
        fileInputFormat.setNestedFileEnumeration(true);
        return env.readFile(fileInputFormat, s3Path, FileProcessingMode.PROCESS_ONCE,30000).setParallelism(1);
    }
}
