package com.bw.yk03;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.datastream.AsyncDataStream;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.api.functions.co.ProcessJoinFunction;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;

import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.time.Duration;
import java.util.Properties;
import java.util.concurrent.TimeUnit;

//in:ods_db_data  : {database: data: operateType table:}
//out:dwd_db_orders
public class FlinkDWD {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);


        Properties properties = new Properties();
        properties.setProperty("bootstrap.servers", "hadoop-single:9092");
        properties.setProperty("group.id", "group1");
        DataStream<String> stream = env.addSource(new FlinkKafkaConsumer<>("ods_db_data", new SimpleStringSchema(), properties));
        //分流：order_info order_detail
        OutputTag<String> odTag = new OutputTag<String>("od") {};
        OutputTag<String> oiTag = new OutputTag<String>("id") {};
        SingleOutputStreamOperator<String> processDS = stream.process(new ProcessFunction<String, String>() {
            @Override
            public void processElement(String s, ProcessFunction<String, String>.Context context, Collector<String> collector) throws Exception {
                JSONObject jo = JSON.parseObject(s);
                JSONObject data = jo.getJSONObject("data");
                if(data.size()>0){
                    if (jo.getString("table").equals("order_detail")) {
                        context.output(odTag, data.toString());
                    } else if (jo.getString("table").equals("order_info")) {
                        context.output(oiTag, data.toString());
                    } else {
                        collector.collect(data.toString());
                    }
                }
            }
        });
        //订单流
        SingleOutputStreamOperator<OrderInfo> oiDS = processDS.getSideOutput(oiTag).map(new MapFunction<String, OrderInfo>() {
            @Override
            public OrderInfo map(String s) throws Exception {
                return JSON.parseObject(s, OrderInfo.class);
            }
        }).assignTimestampsAndWatermarks(WatermarkStrategy
                .<OrderInfo>forBoundedOutOfOrderness(Duration.ofSeconds(0))
                .withTimestampAssigner((event, timestamp) -> {
                    SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
                    try {
                        return sdf.parse(event.getCreate_time()).getTime();
                    } catch (ParseException e) {
                        throw new RuntimeException(e);
                    }
                }));
        //订单明细
        SingleOutputStreamOperator<OrderDetail> odDS = processDS.getSideOutput(odTag).map(new MapFunction<String, OrderDetail>() {
            @Override
            public OrderDetail map(String s) throws Exception {
                return JSON.parseObject(s, OrderDetail.class);
            }
        }).assignTimestampsAndWatermarks(WatermarkStrategy
                .<OrderDetail>forBoundedOutOfOrderness(Duration.ofSeconds(0))
                .withTimestampAssigner((event, timestamp) -> {
                    SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
                    try {
                        return sdf.parse(event.getCreate_time()).getTime();
                    } catch (ParseException e) {
                        throw new RuntimeException(e);
                    }
                }));


        //3）、（DWD层）读取Kafka队列中ODS层数据，自定义Watermark水位线设置事件时间字段值和允许最大时间，将订单数据和订单明细数据，采用自定义双流join方式关联，形成订单宽表数据流；（8分）
        SingleOutputStreamOperator<OrderWide> xt3 = oiDS.keyBy(x -> x.getId()).intervalJoin(odDS.keyBy(x -> x.getOrder_id()))
                .between(Time.minutes(-2), Time.minutes(2))
                .process(new ProcessJoinFunction<OrderInfo, OrderDetail, OrderWide>() {
                    @Override
                    public void processElement(OrderInfo orderInfo, OrderDetail orderDetail, ProcessJoinFunction<OrderInfo, OrderDetail, OrderWide>.Context context, Collector<OrderWide> collector) throws Exception {
                        collector.collect(new OrderWide(orderInfo, orderDetail,null,null,null));
                    }
                });
        //4）、（DWD层）采用异步IO方式，请求Hbase数据库，关联维度表数据（sku_id、user_id、province_id），构建下单宽表数据，存储Kafka消息队列：dwd_db_orders（8分）
        SingleOutputStreamOperator<OrderWide> xt4 = AsyncDataStream.unorderedWait(xt3, new FlinkDWDAsyncIO(), 1000, TimeUnit.MILLISECONDS, 100);
        xt4.print();

        //Sink
        FlinkKafkaProducer<String> myProducer = new FlinkKafkaProducer<>(
                "dwd_db_orders",             // target topic
                new SimpleStringSchema(),    // serialization schema
                properties); // fault-tolerance
        xt4.map(x->JSON.toJSON(x).toString()).addSink(myProducer);
        env.execute();

    }
}
