package cn.gwm.flink.streaming.sink.hdfs;

import cn.gwm.flink.streaming.beans.BeanSource;
import cn.gwm.flink.streaming.constant.BaseFields;
import cn.gwm.flink.streaming.ods.model.OdsOrcVectorizer;
import cn.gwm.flink.streaming.task.BaseTask;
import cn.gwm.utils.ConfigLoader;
import cn.hutool.core.date.DatePattern;
import cn.hutool.core.date.DateUtil;
import cn.hutool.json.JSONObject;
import lombok.extern.slf4j.Slf4j;
import org.apache.flink.api.common.serialization.SimpleStringEncoder;
import org.apache.flink.core.fs.Path;
import org.apache.flink.core.io.SimpleVersionedSerializer;
import org.apache.flink.orc.writer.OrcBulkWriterFactory;
import org.apache.flink.streaming.api.functions.sink.filesystem.BucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig;
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.BasePathBucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.SimpleVersionedStringSerializer;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.OnCheckpointRollingPolicy;
import org.apache.hadoop.conf.Configuration;

import java.util.Properties;
import java.util.concurrent.TimeUnit;

/**
 * @ClassName HdfsUtil
 * @Description
 * @Author LiangGuang
 * @Date 2023/06/06 17:05
 */
@Slf4j
public class HdfsUtil {

    /**
     * 抽取数据实时写入到hdfs的逻辑 按文件存储目录分桶
     *
     * @param prefix 临时文件名 ---  表名
     * @param suffix ext 后缀
     * @param path   文件路径
     * @return
     */
    public static StreamingFileSink<String> hdfsTxtSink(String prefix,
                                                        String suffix,
                                                        String path) {
        //指定文件的命名格式
        OutputFileConfig outputFileConfig = OutputFileConfig.builder()
                .withPartPrefix(prefix)
                .withPartSuffix(suffix)
                .build();

        StreamingFileSink<String> streamingFileSink = StreamingFileSink.forRowFormat(
                        new Path(ConfigLoader.get("hdfsUri") + "/external/data/" + path),
                        new SimpleStringEncoder<String>("utf-8")
                        //指定桶分配器
                ).withBucketAssigner(
                        //将所有部分文件（part file）存储在基本路径中的分配器（单个全局桶）
                        new BasePathBucketAssigner<>()
                        //指定文件滚动策略
                ).withRollingPolicy(
                        DefaultRollingPolicy.builder()
                                //每隔30s钟滚动一次（文件打开持续写入超时时间）
                                .withRolloverInterval(TimeUnit.MINUTES.toMillis(3))
                                //10秒钟没有数据写入滚动一次（非活跃时间超时滚动）
                                .withInactivityInterval(TimeUnit.MINUTES.toMillis(1))
                                //设置64M滚动一次
                                .withMaxPartSize(128 * 1024 * 1024)
                                .build()
                )
                .withOutputFileConfig(outputFileConfig)
                .build();
        return streamingFileSink;
    }


    /**
     * source的 properties 的文件字段，如果datedt不是结尾，则需要在建表语句中将该字段补充别的名称，否则字段顺序不一致
     *
     * @param prefix
     * @param suffix
     * @param path
     * @param sourceEnum
     * @return
     */
    public static StreamingFileSink<JSONObject> hdfsTxtSink(String prefix,
                                                            String suffix,
                                                            String path,
                                                            BeanSource.SourceEnum sourceEnum) {
        OutputFileConfig outputFileConfig = OutputFileConfig.builder()
                .withPartPrefix(prefix)
                .withPartSuffix(suffix)
                .build();

        final StreamingFileSink<JSONObject> streamingFileSink = StreamingFileSink.<JSONObject>forRowFormat(
                        new Path(ConfigLoader.get("hdfsUri") + "/external/data/" + path),
                        new BaseTask.MySimpleStringEncoder<JSONObject>(sourceEnum)
                        //指定桶分配器
                ).withBucketAssigner(
                        new BucketAssigner<JSONObject, String>() {
                            @Override
                            public String getBucketId(JSONObject e, Context context) {
//                                return BaseFields.PARTITION_KEY + "=" + e.getStr(BaseFields.PARTITION_KEY);
                                return BaseFields.PARTITION_KEY + "=" + DateUtil.date(e.getLong(BaseFields.tid)).toString(DatePattern.NORM_DATE_PATTERN);
                            }

                            @Override
                            public SimpleVersionedSerializer getSerializer() {
                                return SimpleVersionedStringSerializer.INSTANCE;
                            }
                        }
                        //指定文件滚动策略
                ).withRollingPolicy(
                        DefaultRollingPolicy.builder()
                                // 滚动间隔
                                .withRolloverInterval(TimeUnit.MINUTES.toMillis(10))
                                // 不活动时间间隔
                                .withInactivityInterval(TimeUnit.MINUTES.toMillis(1))
                                // 最大尺寸
                                .withMaxPartSize(128 * 1024 * 1024)
                                .build()
                )
                .withOutputFileConfig(outputFileConfig)
                .build();

        return streamingFileSink;
    }

    /**
     * or
     *
     * @param prefix
     * @param path
     * @param sourceEnum
     * @return
     */
    public static StreamingFileSink<JSONObject> hdfsOrcSink(String prefix, String path, BeanSource.SourceEnum sourceEnum) {
        Properties writerProperties = new Properties();
        writerProperties.setProperty("orc.compress", "SNAPPY");
        writerProperties.setProperty("orc.compress.size", "5242880");
        //64M
        writerProperties.setProperty("orc.stripe.size", "67108864");
        //128M
        writerProperties.setProperty("orc.block.size", "134217728");
        writerProperties.setProperty("orc.row.index.stride", "10000");
        OrcBulkWriterFactory<JSONObject> writerFactory;
        writerFactory = new OrcBulkWriterFactory<JSONObject>(new OdsOrcVectorizer(BeanSource.getSchema(sourceEnum), sourceEnum), writerProperties, new Configuration());
        //配置sink输出配置信息（这部分可按需修改，提取出公共部分作为baseTask内容的一部分）
        //配置文件前、后缀
        OutputFileConfig config = OutputFileConfig
                .builder()
                .withPartPrefix(prefix)
                .withPartSuffix(".orc")
                .build();
        StreamingFileSink<JSONObject> sink = StreamingFileSink
                //配置文件输出位置（可按需修改）
                .forBulkFormat(new Path(ConfigLoader.get("hdfsUri") + "/external/data/" + path), writerFactory)
                //配置分桶器（可按需修改）
                .withBucketAssigner(new BucketAssigner<JSONObject, String>() {
                    @Override
                    public String getBucketId(JSONObject source, Context context) {
                        return BaseFields.PARTITION_KEY + "=" + source.getStr(BaseFields.PARTITION_KEY);
                    }

                    @Override
                    public SimpleVersionedSerializer<String> getSerializer() {
                        return SimpleVersionedStringSerializer.INSTANCE;
                    }
                })
                //无需修改
                .withRollingPolicy(OnCheckpointRollingPolicy.build())
                .withBucketCheckInterval(60 * 1000)
                //无需修改
                .withOutputFileConfig(config)
                .build();
        return sink;
    }

}
