package com.zallds.flink.stream;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.fastjson.parser.Feature;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringEncoder;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.core.fs.Path;
import org.apache.flink.core.io.SimpleVersionedSerializer;
import org.apache.flink.runtime.state.StateBackend;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.filesystem.BucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig;
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.SimpleVersionedStringSerializer;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;

import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.TimeUnit;

/**
 * Created by liujian on 2020/4/10.
 */
public class Canal2HDFS {

    private static final String fieldDelimiter = ",";

    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);


        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "tv-zds-tmc-004:6667,tv-zds-tmc-005:6667");

        props.setProperty("group.id", "test123");
        props.setProperty("enable.auto.commit", "true");
        FlinkKafkaConsumer<String> consumer = new FlinkKafkaConsumer<String>("canal", new SimpleStringSchema(), props);

        //consumer.setStartFromGroupOffsets();
        consumer.setStartFromLatest();

        DataStream<String> stream = env.addSource(consumer);

        // transform
        SingleOutputStreamOperator<String> dataDs = stream.filter(new FilterFunction<String>() {
            // 过滤掉DDL操作
            public boolean filter(String jsonData) throws Exception {
                JSONObject record = JSON.parseObject(jsonData, Feature.OrderedField);
                return record.getString("isDdl").equals("false") && record.getString("database").equals("test");
            }
        }).map(new MapFunction<String, String>() {
            public String map(String value) throws Exception {
                StringBuilder fieldsBuilder = new StringBuilder();
                JSONObject record = JSON.parseObject(value, Feature.OrderedField);
                String ts = record.getString("ts");
                String table = record.getString("table");
                JSONArray data = record.getJSONArray("data");
                for (int i = 0; i < data.size(); i++) {
                    JSONObject obj = data.getJSONObject(i);
                    fieldsBuilder.append(table);
                    if (obj != null) {
                        for (Map.Entry<String, Object> entry : obj.entrySet()) {
                            fieldsBuilder.append(fieldDelimiter);
                            fieldsBuilder.append(entry.getValue());
                        }
                    }
                    fieldsBuilder.append(fieldDelimiter);
                    fieldsBuilder.append(ts);
                }
                return fieldsBuilder.toString();
            }
        });

        dataDs.print();
        StreamingFileSink<String> sink = StreamingFileSink.forRowFormat(new Path("hdfs:///test/flinkOut"),new SimpleStringEncoder<String>("Utf-8"))
                .withOutputFileConfig(OutputFileConfig.builder().withPartPrefix("0000").build())
                .withBucketAssigner(new TableBucketAssigner())
                .build();
        dataDs.addSink(sink);

        env.execute("Canal2HDFS");
    }

    private static class TableBucketAssigner implements BucketAssigner<String, String> {

        @Override
        public String getBucketId(String element, Context context) {
            return element.split(",")[0];
        }

        @Override
        public SimpleVersionedSerializer<String> getSerializer() {
            return SimpleVersionedStringSerializer.INSTANCE;
        }
    }
}
