package server_timu;

import com.google.gson.Gson;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonNode;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SideOutputDataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.api.functions.windowing.AllWindowFunction;
import org.apache.flink.streaming.api.windowing.assigners.TumblingProcessingTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;
import org.apache.kafka.clients.consumer.OffsetResetStrategy;

import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Comparator;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;

public class T1_ObjectMapper {
    public static void main(String[] args) throws Exception {
        //  todo 创建流式环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //  todo 创建Gson对象，实例化   处理json数据
        Gson gson = new Gson();
        // todo 创建flink自带的解析json数据的实例化
        ObjectMapper objectMapper = new ObjectMapper();

        SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");


        //  todo 创建测输出流存储order_detail的数据  这里的写法避免了泛型擦除
        OutputTag<String> detail_out = new OutputTag<String>("detail_out"){};



        //  todo 拿到topic_db主题的数据

        //  创建kafkasource
        //  todo setStartingOffsets:用于指定 KafkaSource 启动时从哪个位置开始读取数据。
        //  todo OffsetsInitializer.committedOffsets(OffsetResetStrategy.LATEST):
        //  todo 表示从 Kafka 中该消费者组（groupId）已提交的偏移量（committed offsets）开始消费。
        KafkaSource<String> kafka_source = KafkaSource.<String>builder()
                .setBootstrapServers("192.168.40.110:9092")
                .setGroupId("ods_mall_data_group")
                .setValueOnlyDeserializer(new SimpleStringSchema())                 //  只对值进行反序列化
                .setTopics("topic_db")
                .setStartingOffsets(OffsetsInitializer.committedOffsets(OffsetResetStrategy.LATEST))
                .build();

        //  得到数据
        DataStreamSource<String> data = env.fromSource(
                kafka_source,
                WatermarkStrategy.noWatermarks(),
                "kafka_source"
        );


        data.print("查看源数据格式");

        //  todo 这里使用gson的话返回的数据是JsonElement类型的数据
        SingleOutputStreamOperator<JsonNode> info_data = data.process(new ProcessFunction<String, JsonNode>() {
            @Override
            public void processElement(String s, ProcessFunction<String, JsonNode>.Context context, Collector<JsonNode> collector) throws Exception {
                JsonNode jsonNode = objectMapper.readTree(s);
                String table = jsonNode.get("table").asText();
                JsonNode data = jsonNode.get("data");
                if (table.contains("order_master")) {
                    collector.collect(data);
                } else if (table.contains("order_detail")) {
                    context.output(detail_out, data.toString());
                }
            }
        });


        //  todo 对主流数据进行排序(上面已经将数据流分开)
        //  todo WindowAll:全局窗口，将所有数据放到一个窗口，即不分区，分组，并行度为1
        SingleOutputStreamOperator<String> master_data = info_data.windowAll(TumblingProcessingTimeWindows.of(Time.seconds(20)))
                .allowedLateness(Time.minutes(2))
                .apply(new AllWindowFunction<JsonNode, String, TimeWindow>() {
                    @Override
                    public void apply(TimeWindow timeWindow, Iterable<JsonNode> iterable, Collector<String> collector) throws Exception {
                        Stream<JsonNode> stream = StreamSupport.stream(iterable.spliterator(), false);
                        List<JsonNode> sordList = stream.sorted(Comparator.comparing(strings -> {
                            try {
                                return format.parse(strings.get("modified_time").asText()).getTime();
                            } catch (ParseException e) {
                                throw new RuntimeException(e);
                            }
                        })).collect(Collectors.toList());
                        for (JsonNode node : sordList) {
                            collector.collect(node.toString());
                        }
                    }
                });

        //  todo 拿到测输出流的数据
        SideOutputDataStream<String> detail_data = info_data.getSideOutput(detail_out);

        //  todo 根据题目创建不同的kafka_sink
        KafkaSink<String> kafkaSink_master = KafkaSink.<String>builder()
                .setBootstrapServers("192.168.40.110:9092")
                .setRecordSerializer(
                        KafkaRecordSerializationSchema.<String>builder()
                                .setTopic("fact_order_master")
                                .setValueSerializationSchema(new SimpleStringSchema())
                                .build()
                )
                .build();

        KafkaSink<String> kafkaSink_detail = KafkaSink.<String>builder()
                .setBootstrapServers("192.168.40.110:9092")
                .setRecordSerializer(
                        KafkaRecordSerializationSchema.<String>builder()
                                .setTopic("fact_order_detail")
                                .setValueSerializationSchema(new SimpleStringSchema())
                                .build()

                )
                .build();


        //  todo 数据写入对应kafka主题
        master_data.sinkTo(kafkaSink_master);
        detail_data.sinkTo(kafkaSink_detail);

        //  todo 将主流数据输出到error错误流这样是为了区分两条数据流
        master_data.printToErr();
        detail_data.print();


        env.execute();



    }
}
