package streamLogStaticJava.handle;


import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.serialization.SimpleStringEncoder;
import org.apache.flink.core.io.SimpleVersionedSerializer;
import org.apache.flink.formats.parquet.avro.ParquetAvroWriters;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks;
import org.apache.flink.streaming.api.functions.sink.PrintSinkFunction;
import org.apache.flink.streaming.api.functions.sink.filesystem.BucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig;
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.DateTimeBucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy;
import org.apache.flink.streaming.api.watermark.Watermark;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010;
import org.apache.flink.table.api.java.StreamTableEnvironment;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import streamLogStaticJava.pojos.LogbeanSchema;
import streamLogStaticJava.pojos.logbean;

import javax.annotation.Nullable;
import java.io.IOException;
import java.time.ZoneId;
import java.util.Properties;

/**
 * @ClassName kafkatoFileHandle
 * @Description TODO
 * @Author dalong
 * @Date 2020/6/22 21:17
 * @Version 1.0
 * //本地测试文件写入强制中断程序，会导致正在写入的文件未关闭，状态为正在写入，且有写入锁。
 *      集群上提交文件可以正常关闭。
 *          疑问：集群上停止任务钱都做了什么动作？？？
 * hdfs fsck /path/  查看状态
 **/

public class kafkatoFileHandle {

    public static void main(String[] args) throws Exception {

        String savepath="hdfs://localhost:9000/Users/dalong/Downloads/temp/weblogaa";

        StreamExecutionEnvironment streamenv = StreamExecutionEnvironment.getExecutionEnvironment();
        //远程比较任务到集群
        // StreamExecutionEnvironment streamenv = StreamExecutionEnvironment.createRemoteEnvironment("localhost", 6123);

        streamenv.setRestartStrategy(RestartStrategies.fallBackRestart());
        streamenv.enableCheckpointing(1000, CheckpointingMode.EXACTLY_ONCE);
        streamenv.setStateBackend(new FsStateBackend("hdfs://localhost:9000/flink/checkpoint/"));

        Properties properties = new Properties();
        properties.setProperty("bootstrap.servers","localhost:9092");
        properties.setProperty("group.id","weblog");
        properties.setProperty("key.serializer","org.apache.kafka.common.serialization.StringSerializer");
        properties.setProperty("value.serializer","org.apache.kafka.common.serialization.StringSerializer");
        FlinkKafkaConsumer010<logbean> logbeanFlinkKafkaConsumer010 = new FlinkKafkaConsumer010<logbean>("click_events",new LogbeanSchema(),properties);
        logbeanFlinkKafkaConsumer010.setStartFromEarliest();
        DataStreamSource<logbean> datastream = streamenv.addSource(logbeanFlinkKafkaConsumer010);

        //StreamTableEnvironment stblenv = StreamTableEnvironment.create(streamenv);

        SingleOutputStreamOperator<logbean> datastreamw = datastream.assignTimestampsAndWatermarks(new AssignerWithPeriodicWatermarks<logbean>() {

            long nowtimestamp = 0;
            long interval = 100;

            @Nullable
            @Override
            public Watermark getCurrentWatermark() {
                return new Watermark(nowtimestamp - interval);
            }

            @Override
            public long extractTimestamp(logbean element, long previousElementTimestamp) {
                nowtimestamp = Double.doubleToLongBits(element.getClick_ts() * 1000);
                return nowtimestamp;
            }
        });


        checkAndDelDir(savepath);
        checkAndDelDir(savepath+"/parquet");

        SingleOutputStreamOperator<logbean> map = datastreamw.map((MapFunction<logbean, logbean>) value -> value).uid("map");
        //将文件写入file
        datastreamw.addSink(getRowStreamingFileSink(savepath));

        datastreamw.addSink(new PrintSinkFunction<>());
        datastreamw.addSink(getParquetStreamingFileSink(savepath+"/parquet"));
        /*Table kafkatbl = stblenv.fromDataStream(datastreamw);

        stblenv.registerTable("kafkatbl",kafkatbl);


        Table restbl = stblenv.sqlQuery("select visitURL,userName,click_ts from kafkatbl");


        DataStream<logbean> logbeanDataStream = stblenv.toAppendStream(restbl, logbean.class);
        logbeanDataStream.print();

        checkAndDelDir(savepath);

        //restbl.writeToSink();

        FileSystem fileSystem = new FileSystem();
        fileSystem.path(savepath);

        Schema schema = new Schema();
        schema.field("visitURL", DataTypes.STRING())
        .field("userName",DataTypes.STRING())
        .field("click_ts",DataTypes.DOUBLE())
        ;


        stblenv.connect(fileSystem)
                .withFormat(new Csv().fieldDelimiter(','))
                .withSchema(schema)
                .createTemporaryTable("csvSinktable")
        ;
        restbl.insertInto("csvSinktable");
        //功能等同insertinto
        //stblenv.sqlUpdate("insert into csvSinktable select visitURL,userName,click_ts from kafkatbl");
*/
        streamenv.execute("kafkatofile");

    }

    private static void checkAndDelDir(String dir){

        Path path = new Path(dir);
        Configuration conf = new Configuration();

        try (org.apache.hadoop.fs.FileSystem hfs = org.apache.hadoop.fs.FileSystem.get(path.toUri(), conf)) {
            if(hfs.exists(path)){
                hfs.delete(path,true);
                System.out.println("------------delete file success");
            }
        }catch (Exception e){

        }
    }

    private static StreamingFileSink<logbean> getRowStreamingFileSink(String path){
        //文件滚动策略
        DefaultRollingPolicy<logbean,String> rollingPolicy = DefaultRollingPolicy.builder()
                //60s空闲就滚动写入新文件
                .withInactivityInterval(60 * 1000)
                //设置每个文件的最大大小 128MB
                .withMaxPartSize(1024L * 1024L * 128L)
                //滚动写入新文件的时间 默认60s
                .withInactivityInterval(60 * 1000)
                .build();
        //文件分桶策略


        StreamingFileSink<logbean> filesink = StreamingFileSink.forRowFormat(new org.apache.flink.core.fs.Path(path), new SimpleStringEncoder<logbean>("UTF-8"))
                //指定文件滚动写入策略
                .withRollingPolicy(rollingPolicy)
                //指定文件分捅策略
                .withBucketAssigner(new BucketAssigner<logbean, String>() {
                    private static final long serialVersionUID = 1L;
                    //这个是默认的分桶路径格式，默认根据`dateTimeFormatter.format(Instant.ofEpochMilli(context.currentProcessingTime()))`也就是处理时			 间来粪桶, 比如/xxx/2020-01-01--00,这样的路径
                    private static final String DEFAULT_FORMAT_STRING = "yyyy-MM-dd--HH";

                    @Override
                    public String getBucketId(logbean element, Context context) {
                        return (int)(Math.random()*1000)%3+"";
                    }

                    @Override
                    public SimpleVersionedSerializer<String> getSerializer() {
                        return new SimpleVersionedSerializer<String>() {
                            @Override
                            public int getVersion() {
                                return (int)serialVersionUID;
                            }

                            @Override
                            public byte[] serialize(String s) throws IOException {
                                return s.getBytes();
                            }

                            @Override
                            public String deserialize(int i, byte[] bytes) throws IOException {
                                return new String(bytes);
                            }
                        };
                    }
                })
                .build();

        return filesink;
    }

    public static StreamingFileSink<logbean> getParquetStreamingFileSink(String path){

        StreamingFileSink<logbean> sink = StreamingFileSink.forBulkFormat(new org.apache.flink.core.fs.Path(path), ParquetAvroWriters.forReflectRecord(logbean.class))
                .withBucketCheckInterval(1000*10)
                //.withRollingPolicy()
                .withNewBucketAssigner(new DateTimeBucketAssigner("yyyyMMdd", ZoneId.of("Asia/Shanghai")))
//                .withOutputFileConfig()
                .build();
        return sink;
    }
}
