package cn.gwm.flink.streaming.task;

import cn.gwm.flink.streaming.beans.BeanSource;
import cn.gwm.flink.streaming.beans.GwmKafkaJson;
import cn.gwm.flink.streaming.beans.SourceHbaseBean;
import cn.gwm.flink.streaming.constant.BaseFields;
import cn.gwm.flink.streaming.constant.FaultToleranceConstant;
import cn.gwm.flink.streaming.ods.model.OdsOrcVectorizer;
import cn.gwm.flink.streaming.ods.model.OdsSignalSource;
import cn.gwm.flink.streaming.ods.model.OdsSignalSourceVectorizer;
import cn.gwm.flink.streaming.sink.kafka.KafkaSinkProducer;
import cn.gwm.utils.ConfigLoader;
import cn.gwm.utils.HiveStringUtil;
import cn.gwm.utils.StringUtil;
import cn.hutool.core.bean.BeanUtil;
import cn.hutool.core.date.DatePattern;
import cn.hutool.core.date.DateUtil;
import cn.hutool.json.JSONObject;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.flink.api.common.RuntimeExecutionMode;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.serialization.DeserializationSchema;
import org.apache.flink.api.common.serialization.Encoder;
import org.apache.flink.api.common.serialization.SimpleStringEncoder;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.configuration.RestOptions;
import org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend;
import org.apache.flink.core.fs.Path;
import org.apache.flink.core.io.SimpleVersionedSerializer;
import org.apache.flink.orc.writer.OrcBulkWriterFactory;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.filesystem.BucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig;
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.BasePathBucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.DateTimeBucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.SimpleVersionedStringSerializer;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.OnCheckpointRollingPolicy;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumerBase;
import org.apache.hadoop.conf.Configuration;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;

import java.io.IOException;
import java.io.OutputStream;
import java.nio.charset.Charset;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;

/**
 * 定义所有job的父类，所有的业务主题都需要继承自该类
 * <p>
 * 需要定义的方法：
 * 1）抽象出来初始化flink流式开发环境的方法
 * 2）初始化kafka消费者实例，消费kafka数据，返回消费到的数据
 * 3）抽取数据实时写入到hdfs的逻辑
 */
@Slf4j
public abstract class BaseTask {
    /**
     * todo 1）抽象出来初始化flink流式开发环境的方法
     *
     * @param taskName
     * @return
     */
    protected static StreamExecutionEnvironment getEnv(String taskName) {
        //本地执行作业的时候，需要向hdfs写入数据（checkpoint），因此会有权限的问题
        System.setProperty("HADOOP_USER_NAME", "root");
        org.apache.flink.configuration.Configuration configuration = new org.apache.flink.configuration.Configuration();
        configuration.setInteger(RestOptions.PORT, 8081);
        //todo 1）初始化flink流处理的运行环境
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //todo 2）按照事件时间语义处理数据（terminalTime）
        env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

        //注册全局的作业参数信息
//        ConfigLoader.
        env.getConfig().setGlobalJobParameters(ConfigLoader.parameterTool);

        //设置并行度，尽可能不要设置代码级别的并行度，这里设置仅仅是为了测试
        //env.setParallelism(1);

        //todo 3）开启checkpoint
        // 3.1）设置每隔30s一个周期开启checkpoint（checkpoint开启周期，不能太长也不能太短：1-5分钟）
        env.enableCheckpointing(30 * 1000);
        // 3.2）设置检查点的mode，exactly-once，保证数据的一次性语义
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        // 3.3）设置两次checkpoint之间的时间间隔，避免两次间隔时间太近导致频繁的checkpoint
        env.getCheckpointConfig().setCheckpointInterval(1000L);
        // 3.4）设置checkpoint的超时时间（一般设置一个checkpoint周期的二分之一）
        env.getCheckpointConfig().setCheckpointTimeout(15 * 1000);
        // 3.5）设置checkpoint的最大尝试次数，同一个时间内有几个checkpoint在运行
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        // 3.6）设置job作业取消的时候，是否保留checkpoint的计算结果
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        // 3.7）设置job作业运行过程中，如果checkpoint失败，是否停止作业
        env.getCheckpointConfig().setFailOnCheckpointingErrors(false);
        // 3.8）设置checkpoint的存储后端，使用rocksdb作为状态后端
        env.setStateBackend(new EmbeddedRocksDBStateBackend());
        env.getCheckpointConfig().setCheckpointStorage(ConfigLoader.get("hdfsUri") + "/flink-checkpoints/" + taskName);

        //todo 4）设置作业的重启策略（固定延迟重启策略、失败率重启策略、无重启策略）
        // 4.1）如果没有开启checkpoint，则是无重启策略，反之开启checkpoint但是没有设置重启策略，默认是不停的重启
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(
                3,
                Time.seconds(10)
        ));

        //返回env
        return env;
    }

    public static void envSet(StreamExecutionEnvironment env, String jobName) {
//        BaseTask.checkPoint(env);
        // 开启检查点  5分钟一次
        env.enableCheckpointing(5 * 60 * 1000L);
        CheckpointConfig checkpointConfig = env.getCheckpointConfig();
        // 精确一次
        checkpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        // 检查点最小间隔 1分钟
        checkpointConfig.setMinPauseBetweenCheckpoints(1 * 60 * 1000L);
        // 检查点超时时间 3分钟
        checkpointConfig.setCheckpointTimeout(8 * 60 * 1000L);
        // 检查点异常时 任务不失败，丢点检查点继续运行
        checkpointConfig.setFailOnCheckpointingErrors(false);
        // 检查点失败容忍度，检查点失败2次才进行重启
        checkpointConfig.setTolerableCheckpointFailureNumber(2);
        //启用不对齐检查点保存方式
        checkpointConfig.enableUnalignedCheckpoints();
        // 状态后端，存储位置，
        // 设置checkpoint的存储后端，FsStateBackend
        env.setStateBackend(new FsStateBackend(FaultToleranceConstant.CHECKPOINT_URL + jobName));
        // 作业取消的时候也会保留外部检查点
        checkpointConfig.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

        // 重启策略 （固定延迟重启策略、失败率重启策略、无重启策略）
        // 失败率重启，10分钟内尝试3次重启，每次延迟1分钟
        env.setRestartStrategy(RestartStrategies.failureRateRestart(3, Time.minutes(10), Time.minutes(1)));

    }

    /**
     * todo 2）初始化kafka消费者实例，消费kafka数据，返回消费到的数据
     *
     * @param <T>
     * @return
     */
    protected static <T> DataStreamSource<T> getKafkaStream(StreamExecutionEnvironment env,
                                                            String groupId,
                                                            Class<? extends DeserializationSchema> clazz
    ) {
        //todo 5）创建flink消费kafka的FlinkKafkaConsumer，指定kafka的参数信息（kafka集群地址、消费者组id、反序列化等等）
        FlinkKafkaConsumer<T> consumer = null;
        //定义连接所需要的参数
        Properties props = new Properties();
        props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, ConfigLoader.get("bootstrap.servers"));
        props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        //指定分区的动态发现
        props.setProperty(FlinkKafkaConsumerBase.KEY_PARTITION_DISCOVERY_INTERVAL_MILLIS,
                ConfigLoader.get("flink.partition-discovery.interval-millis"));

        try {
            consumer = new FlinkKafkaConsumer<T>(
                    ConfigLoader.get("kafka.topic.es11"),
                    clazz.newInstance(),
                    props
            );
            //设置消费位置（为了测试需要，每次消费都是从最初的位置消费）
            //consumer.setStartFromEarliest();
            consumer.setStartFromLatest();
        } catch (InstantiationException e) {
            e.printStackTrace();
        } catch (IllegalAccessException e) {
            e.printStackTrace();
        }
        //设置checkpoint提交的时候是否同时将offset同步递交
        consumer.setCommitOffsetsOnCheckpoints(true);
        //todo 6）将kafka的消费者实例添加到环境中
        final DataStreamSource<T> streamSource = env.addSource(consumer);

        //返回数据流对象
        return streamSource;
    }


    /**
     * todo 3）抽取数据实时写入到hdfs的逻辑 按日期分桶存储
     *
     * @param prefix               临时文件名 ---  表名
     * @param suffix               ext 后缀
     * @param db                   数据库  vaas_dwd.db
     * @param path                 文件路径
     * @param bucketAssignerFormat 日期个是  YYYYMMDD
     * @return
     */
    protected static StreamingFileSink<String> getFileSink(String prefix,
                                                           String suffix,
                                                           String db,
                                                           String path,
                                                           String bucketAssignerFormat) {
        //指定文件的命名格式
        OutputFileConfig outputFileConfig = OutputFileConfig.builder()
                .withPartPrefix(prefix)
                .withPartSuffix(suffix)
                .build();

        final StreamingFileSink<String> streamingFileSink = StreamingFileSink.forRowFormat(
                        new Path(ConfigLoader.get("hdfsUri") + "/user/hive/warehouse/" + db + "/" + path),
                        new SimpleStringEncoder<String>("utf-8")
                        //指定桶分配器
                ).withBucketAssigner(
                        //设置一天一个桶
                        new DateTimeBucketAssigner<>(bucketAssignerFormat)
                        //指定文件滚动策略
                ).withRollingPolicy(
                        DefaultRollingPolicy.builder()
                                .withRolloverInterval(TimeUnit.MINUTES.toMillis(30))
                                .withInactivityInterval(TimeUnit.MINUTES.toMillis(10))
                                .withMaxPartSize(128 * 1024 * 1024)
                                .build()
                )
                .withOutputFileConfig(outputFileConfig)
                .build();

        //返回streamingFileSink
        return streamingFileSink;
    }

    /**
     * 抽取数据实时写入到hdfs的逻辑 按文件存储目录分桶
     *
     * @param prefix 临时文件名 ---  表名
     * @param suffix ext 后缀
     * @param path   文件路径
     * @return
     */
    public static StreamingFileSink<String> hdfsSink(String prefix,
                                                     String suffix,
                                                     String path) {
        //指定文件的命名格式
        OutputFileConfig outputFileConfig = OutputFileConfig.builder()
                .withPartPrefix(prefix)
                .withPartSuffix(suffix)
                .build();

        StreamingFileSink<String> streamingFileSink = StreamingFileSink.forRowFormat(
                        new Path(ConfigLoader.get("hdfsUri") + "/external/data/" + path),
                        new SimpleStringEncoder<String>("utf-8")
                        //指定桶分配器
                ).withBucketAssigner(
                        //将所有部分文件（part file）存储在基本路径中的分配器（单个全局桶）
                        new BasePathBucketAssigner<>()
                        //指定文件滚动策略
                ).withRollingPolicy(
                        DefaultRollingPolicy.builder()
                                //每隔30s钟滚动一次（文件打开持续写入超时时间）
                                .withRolloverInterval(TimeUnit.MINUTES.toMillis(3))
                                //10秒钟没有数据写入滚动一次（非活跃时间超时滚动）
                                .withInactivityInterval(TimeUnit.MINUTES.toMillis(1))
                                //设置64M滚动一次
                                .withMaxPartSize(128 * 1024 * 1024)
                                .build()
                )
                .withOutputFileConfig(outputFileConfig)
                .build();
        return streamingFileSink;
    }

    /**
     * 抽取数据实时写入到hdfs的逻辑 按文件存储目录分桶
     *
     * @param prefix 临时文件名 ---  表名
     * @param suffix ext 后缀
     * @param path   文件路径
     * @return
     */
    public static StreamingFileSink<String> hdfsSink(String prefix,
                                                     String suffix,
                                                     String path, long rolloverInterval, long inactivityInterval, long maxPartSize) {
        //指定文件的命名格式
        OutputFileConfig outputFileConfig = OutputFileConfig.builder()
                .withPartPrefix(prefix)
                .withPartSuffix(suffix)
                .build();

        StreamingFileSink<String> streamingFileSink = StreamingFileSink.forRowFormat(
                        new Path(ConfigLoader.get("hdfsUri") + "/external/data/" + path),
                        new SimpleStringEncoder<String>("utf-8")
                        //指定桶分配器
                ).withBucketAssigner(
                        //将所有部分文件（part file）存储在基本路径中的分配器（单个全局桶）
                        new BasePathBucketAssigner<>()
                        //指定文件滚动策略
                ).withRollingPolicy(
                        DefaultRollingPolicy.builder()
                                //每隔30s钟滚动一次（文件打开持续写入超时时间）
                                .withRolloverInterval(rolloverInterval)
                                //10秒钟没有数据写入滚动一次（非活跃时间超时滚动）
                                .withInactivityInterval(inactivityInterval)
                                //设置64M滚动一次
                                .withMaxPartSize(maxPartSize)
                                .build()
                )
                .withOutputFileConfig(outputFileConfig)
                .build();
        return streamingFileSink;
    }


    static void defaultSinkToKafka(DataStream dataStream, String topic) {
        defaultSinkToKafka(dataStream, topic, null, null, null);
    }


    /**
     * flink流推送kafka 默认方法
     *
     * @param dataStream flink流
     * @param topic      主题
     */
    static void defaultSinkToKafka(DataStream dataStream, String topic, String partitionKey, Properties properties, Integer parallelism) {
        Properties prodProps = new Properties();
        prodProps.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, ConfigLoader.get("bootstrap.servers"));
        prodProps.setProperty(ProducerConfig.ACKS_CONFIG, "all");
        prodProps.setProperty(ProducerConfig.RETRIES_CONFIG, String.valueOf(3));
        prodProps.setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, String.valueOf(10 * 60 * 1000L));
        prodProps.setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true");
        prodProps.setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "1");

        if (properties != null) {
            for (String key : properties.stringPropertyNames()) {
                prodProps.setProperty(key, properties.getProperty(key));
            }
        }

        dataStream
                .map((MapFunction) value -> {
                    Map<String, Object> objectMap = BeanUtil.beanToMap(value);
                    return GwmKafkaJson.builder()
                            .timestamp(System.currentTimeMillis())
                            .type("json")
                            .sourceBody(objectMap)
                            .build();
                })
                .returns(Types.GENERIC(JSONObject.class))
                .addSink(
                        StringUtils.isNotBlank(partitionKey) ? KafkaSinkProducer.producerBuilder(topic, prodProps, partitionKey) : KafkaSinkProducer.producerBuilder(topic, prodProps)
                )
                .setParallelism(parallelism == null ? 1 : parallelism)
                .name("addSink to kafka " + topic);
    }

    /**
     * flink流推送kafka 默认方法
     *
     * @param dataStream flink流
     * @param topic      主题
     */
    static void defaultSinkToKafka(DataStream dataStream, String topic, String transactionTimeout, int parallelism) {
        dataStream.print();
        Properties prodProps = new Properties();
        prodProps.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, ConfigLoader.get("bootstrap.servers"));
        prodProps.setProperty(ProducerConfig.ACKS_CONFIG, "all");
        //发送频率,默认是5ms=5
        prodProps.setProperty(ProducerConfig.LINGER_MS_CONFIG, "10");
        //批量发送，默认是16kB=16384
        prodProps.setProperty(ProducerConfig.BATCH_SIZE_CONFIG, "8192");
        prodProps.setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, transactionTimeout);
        prodProps.setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true");
        prodProps.setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "1");
        dataStream
                .map((MapFunction) value -> {
                    JSONObject jsonObject = new JSONObject();
                    jsonObject.put("source", value);
                    return GwmKafkaJson.builder()
                            .timestamp(System.currentTimeMillis())
                            .type("json")
                            .body(jsonObject)
                            .build();
                }).setParallelism(parallelism)
                .returns(Types.GENERIC(JSONObject.class))
                .addSink(KafkaSinkProducer.producerBuilder(topic, prodProps)).setParallelism(parallelism).uid("sink-kafka-uid").name("addSink kafka");
    }

    /**
     * hbase数据流转化
     *
     * @param jsonObjectDataStream
     * @param hbaseTableName
     * @return
     */
    static DataStream<SourceHbaseBean> convertStream(DataStream<com.alibaba.fastjson.JSONObject> jsonObjectDataStream, String hbaseTableName, int parallelism) {
        DataStream<SourceHbaseBean> hbaseBeanDataStream = jsonObjectDataStream.map(new MapFunction<com.alibaba.fastjson.JSONObject, SourceHbaseBean>() {
            @Override
            public SourceHbaseBean map(com.alibaba.fastjson.JSONObject object) throws Exception {
                String rowkey = "";
                if (StringUtils.isNotBlank(object.getString("deviceId"))) {
                    rowkey = object.getString("deviceId") + StringUtil.reverse(object.getString("item_time"));
                } else {
                    rowkey = ConfigLoader.get("hbase.table.rowkey.default");
                }
                SourceHbaseBean bean = new SourceHbaseBean()
                        .setRowKey(rowkey)
                        .setTableName(hbaseTableName)
                        .setColumnFamily(ConfigLoader.get("hbase.table.columnfamily"))
                        .setDatamap(object);
                return bean;
            }
        }).setParallelism(parallelism);
        return hbaseBeanDataStream;
    }

    /**
     * kafka消费实例装饰
     *
     * @param groupId 消费者组id
     * @return 消费者实例
     */
    protected static FlinkKafkaConsumer decorateKafkaConsumer(String groupId, String... topics) {
        Properties props = new Properties();
        props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, ConfigLoader.get("bootstrap.servers"));
        props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
        props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        props.setProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "10");
        List<String> topic = Arrays.stream(topics).collect(Collectors.toList());
        FlinkKafkaConsumer<String> kafkaConsumer = new FlinkKafkaConsumer<>(topic, new SimpleStringSchema(), props);
        kafkaConsumer.setCommitOffsetsOnCheckpoints(true);
        kafkaConsumer.setStartFromGroupOffsets();
        return kafkaConsumer;
    }

    /**
     * source的 properties 的文件字段，如果datedt不是结尾，则需要在建表语句中将该字段补充别的名称，否则字段顺序不一致
     *
     * @param prefix
     * @param suffix
     * @param path
     * @param sourceEnum
     * @return
     */
    public static StreamingFileSink<JSONObject> getFileSink(String prefix,
                                                            String suffix,
                                                            String path,
                                                            BeanSource.SourceEnum sourceEnum) {
        OutputFileConfig outputFileConfig = OutputFileConfig.builder()
                .withPartPrefix(prefix)
                .withPartSuffix(suffix)
                .build();

        final StreamingFileSink<JSONObject> streamingFileSink = StreamingFileSink.<JSONObject>forRowFormat(
                        new Path(ConfigLoader.get("hdfsUri") + "/external/data/" + path),
                        new MySimpleStringEncoder<JSONObject>(sourceEnum)
                        //指定桶分配器
                ).withBucketAssigner(
                        new BucketAssigner<JSONObject, String>() {
                            @Override
                            public String getBucketId(JSONObject e, Context context) {
                                return BaseFields.PARTITION_KEY + "=" + DateUtil.date(e.getLong(BaseFields.tid)).toString(DatePattern.NORM_DATE_PATTERN);
                            }

                            @Override
                            public SimpleVersionedSerializer getSerializer() {
                                return SimpleVersionedStringSerializer.INSTANCE;
                            }
                        }
                        //指定文件滚动策略
                ).withRollingPolicy(
                        DefaultRollingPolicy.builder()
                                .withRolloverInterval(TimeUnit.MINUTES.toMillis(10))
                                .withInactivityInterval(TimeUnit.MINUTES.toMillis(1))
                                .withMaxPartSize(128 * 1024 * 1024)
                                .build()
                )
                .withOutputFileConfig(outputFileConfig)
                .build();

        return streamingFileSink;

    }


    /**
     * 自定义 序列化
     *
     * @param <IN>
     */
    public static class MySimpleStringEncoder<IN> implements Encoder<IN> {
        private static final long serialVersionUID = -2225105881165804472L;
        private transient Charset charset;
        private BeanSource.SourceEnum sourceEnum;

        public MySimpleStringEncoder(BeanSource.SourceEnum sourceEnum) {
            this.sourceEnum = sourceEnum;
        }

        @Override
        public void encode(IN element, OutputStream stream) throws IOException {
            if (this.charset == null) {
                this.charset = Charset.forName("UTF-8");
            }
            stream.write(HiveStringUtil.toHiveString((JSONObject) element, sourceEnum).getBytes(this.charset));
            stream.write(10);
        }
    }

    public static StreamingFileSink<OdsSignalSource> getOdsSignalSourceFileSink(String prefix) {
        return getOdsSignalSourceFileSink(prefix, "odsOrc");
    }

    public static StreamingFileSink<OdsSignalSource> getOdsSignalSourceFileSink(String prefix, String path) {
        String schema = "struct<deviceId:string,itemTime:string,itemDate:string,ingestionTime:string,ingestionDate:string,source:string,bizType:string,content:string>";
        Properties writerProperties = new Properties();
        writerProperties.setProperty("orc.compress", "SNAPPY");
        writerProperties.setProperty("orc.compress.size", "5242880");
        //64M
        writerProperties.setProperty("orc.stripe.size", "67108864");
        //128M
        writerProperties.setProperty("orc.block.size", "134217728");
        writerProperties.setProperty("orc.row.index.stride", "10000");
        OrcBulkWriterFactory<OdsSignalSource> writerFactory = new OrcBulkWriterFactory<OdsSignalSource>(new OdsSignalSourceVectorizer(schema), writerProperties, new Configuration());
        //配置sink输出配置信息（这部分可按需修改，提取出公共部分作为baseTask内容的一部分）
        //配置文件前、后缀
        OutputFileConfig config = OutputFileConfig
                .builder()
                .withPartPrefix(prefix)
                .withPartSuffix(".orc")
                .build();
        StreamingFileSink<OdsSignalSource> sink = StreamingFileSink
                //配置文件输出位置（可按需修改）
                .forBulkFormat(new Path(ConfigLoader.get("hdfsUri") + "/external/data/" + path + "/" + prefix), writerFactory)
                //配置分桶器（可按需修改）
                .withBucketAssigner(new BucketAssigner<OdsSignalSource, String>() {
                    @Override
                    public String getBucketId(OdsSignalSource source, Context context) {
                        return BaseFields.PARTITION_KEY + "=" + source.getItemDate();
                    }

                    @Override
                    public SimpleVersionedSerializer<String> getSerializer() {
                        return SimpleVersionedStringSerializer.INSTANCE;
                    }
                })
                //无需修改
                .withRollingPolicy(OnCheckpointRollingPolicy.build())
                //无需修改
                .withOutputFileConfig(config)
                .build();
        return sink;
    }

    public static StreamingFileSink<JSONObject> getOrcTxtSink(String prefix, String path, List<String> semaphoreList) {
        return getOrcTxtSink(prefix, path, null, null, semaphoreList);
    }

    public static StreamingFileSink<JSONObject> getOrcTxtSink(String prefix, String path, BeanSource.SourceEnum sourceEnum) {
        return getOrcTxtSink(prefix, path, sourceEnum, sourceEnum);
    }

    /**
     * @return org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink<cn.hutool.json.JSONObject>
     * @Author xzh
     * @Description orc 压缩存储，全字段展示
     * @Date 2023/1/29 16:05
     * @Param [prefix, path, sourceEnum1, sourceEnum2] sourceEnum1： 输出数据源格式，sourceEnum2：输入数据源格式
     **/
    public static StreamingFileSink<JSONObject> getOrcTxtSink(String prefix, String path, BeanSource.SourceEnum sourceEnum1, BeanSource.SourceEnum sourceEnum2) {
        return getOrcTxtSink(prefix, path, sourceEnum1, sourceEnum2, null);
    }

    public static StreamingFileSink<JSONObject> getOrcTxtSink(String prefix, String path, BeanSource.SourceEnum sourceEnum1, BeanSource.SourceEnum sourceEnum2, List<String> semaphoreList) {
        Properties writerProperties = new Properties();
        writerProperties.setProperty("orc.compress", "SNAPPY");
        writerProperties.setProperty("orc.compress.size", "5242880");
        //64M
        writerProperties.setProperty("orc.stripe.size", "67108864");
        //128M
        writerProperties.setProperty("orc.block.size", "134217728");
        writerProperties.setProperty("orc.row.index.stride", "10000");
        OrcBulkWriterFactory<JSONObject> writerFactory;
        if (semaphoreList == null) {
            writerFactory = new OrcBulkWriterFactory<JSONObject>(new OdsOrcVectorizer(BeanSource.getSchema(sourceEnum1), sourceEnum2), writerProperties, new Configuration());
        } else {
            log.error("schema:{}", BeanSource.getSchema(semaphoreList));
            log.error("semaphoreList:{}", semaphoreList);
            writerFactory = new OrcBulkWriterFactory<JSONObject>(new OdsOrcVectorizer(BeanSource.getSchema(semaphoreList), semaphoreList), writerProperties, new Configuration());
        }
        //配置sink输出配置信息（这部分可按需修改，提取出公共部分作为baseTask内容的一部分）
        //配置文件前、后缀
        OutputFileConfig config = OutputFileConfig
                .builder()
                .withPartPrefix(prefix)
                .withPartSuffix(".orc")
                .build();
        StreamingFileSink<JSONObject> sink = StreamingFileSink
                //配置文件输出位置（可按需修改）
                .forBulkFormat(new Path(ConfigLoader.get("hdfsUri") + "/external/data/" + path + "/" + prefix), writerFactory)
                //配置分桶器（可按需修改）
                .withBucketAssigner(new BucketAssigner<JSONObject, String>() {
                    @Override
                    public String getBucketId(JSONObject source, Context context) {
                        return BaseFields.PARTITION_KEY + "=" + DateUtil.format(DateUtil.date(source.getLong(BaseFields.tid)),
                                DatePattern.NORM_DATE_PATTERN);
                    }

                    @Override
                    public SimpleVersionedSerializer<String> getSerializer() {
                        return SimpleVersionedStringSerializer.INSTANCE;
                    }
                })
                //无需修改
                .withRollingPolicy(OnCheckpointRollingPolicy.build())
                .withBucketCheckInterval(60 * 1000)
                //无需修改
                .withOutputFileConfig(config)
                .build();
        return sink;
    }


}
