package net.bw.realtime.jtp.dwd.log.job;

import com.alibaba.fastjson.JSON;
import net.bw.realtime.jtp.common.utils.KafkaUtil;
import net.bw.realtime.jtp.dwd.log.function.AdjustIsNewProcessFunction;
import net.bw.realtime.jtp.dwd.log.function.LogSplitProcessFunction;
import net.bw.realtime.jtp.dwd.log.function.JtpLogClickHouseSinkFunction;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;

/*
 * DWD层：对日志数据进行ETL清洗、分流处理
 * @author liuyawei
 * @date 2025/5/18
 */
public class JtpLogEtlJob {

    public static void main(String[] args) throws Exception {

        // 1. 创建执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // 设置并行度
        env.setParallelism(1);

        // 2. 读取数据
        DataStream<String> logDataStream = KafkaUtil.consumerKafka(env, "topic-log");
        //logDataStream.print();

        // 3. 转换数据 4. 输出数据
        process(logDataStream);

        // 5. 启动程序
        env.execute("JtpLogEtlJob");

    }

    /*
     * 1.数据清洗
     * 2.新老访客状态标记修复
     * 3.数据分流
     * 4.数据存储
     * @param logDataStream
     */
    private static void process(DataStream<String> logDataStream) {
        // 1. 对数据进行清洗，过滤脏数据
        DataStream<String> logCleaned = logCleaned(logDataStream);

        // 2. 对数据进行新老访客状态标记修复
        DataStream<String> processedIsNew = processIsNew(logCleaned);

        // 3. 数据分流
        SingleOutputStreamOperator<String> pageStream = splitStream(processedIsNew);

        // 4. 数据存储
        KafkaUtil.producerKafka(pageStream, "dwd-traffic-page-log");

        // 5. 数据存储clickhouse
        sinkToClickhouse(pageStream);

    }

    // 4. 数据存储clickhouse
    private static void sinkToClickhouse(SingleOutputStreamOperator<String> pageStream) {
        JtpLogClickHouseSinkFunction.savePageLog(pageStream);
        JtpLogClickHouseSinkFunction.saveStartLog(pageStream.getSideOutput(new OutputTag<String>("start-log"){}));
        JtpLogClickHouseSinkFunction.saveDisplayLog(pageStream.getSideOutput(new OutputTag<String>("display-log"){}));
        JtpLogClickHouseSinkFunction.saveActionLog(pageStream.getSideOutput(new OutputTag<String>("action-log"){}));
        JtpLogClickHouseSinkFunction.saveErrorLog(pageStream.getSideOutput(new OutputTag<String>("error-log"){}));
    }

    /*
     * 3. 数据分流
     */
    private static SingleOutputStreamOperator<String> splitStream(DataStream<String> processedIsNew) {

        //  1. 创建对应日志的侧输出流
        OutputTag<String> errorTag = new OutputTag<String>("error-log"){};
        OutputTag<String> startTag = new OutputTag<String>("start-log"){};
        OutputTag<String> displayTag = new OutputTag<String>("display-log"){};
        OutputTag<String> actionTag = new OutputTag<String>("action-log"){};

        // 2. 对数据进行分流
        // 调用  LogSplitProcessFunction 对日志数据进行分流
        SingleOutputStreamOperator<String> pageTag = processedIsNew.process(
                new LogSplitProcessFunction(errorTag, startTag, displayTag, actionTag)
        );

        // 3. 输出数据,并存储kafka
        // 错误日志
        DataStream<String> errorStream = pageTag.getSideOutput(errorTag);
        KafkaUtil.producerKafka(errorStream, "dwd-traffic-error-log");

        // 启动日志
        DataStream<String> startStream = pageTag.getSideOutput(startTag);
        KafkaUtil.producerKafka(startStream, "dwd-traffic-start-log");

        // 曝光日志
        DataStream<String> displayStream = pageTag.getSideOutput(displayTag);
        KafkaUtil.producerKafka(displayStream, "dwd-traffic-display-log");

        // 动作日志
        DataStream<String> actionStream = pageTag.getSideOutput(actionTag);
        KafkaUtil.producerKafka(actionStream, "dwd-traffic-action-log");

        //  4. 返回数据
        return pageTag;
    }

    /*
     *  2.新老访客状态标记修复
     */
    private static DataStream<String> processIsNew(DataStream<String> logCleaned) {

        // 根据设备ID进行分组
        KeyedStream<String, String> midKeyedStream = logCleaned.keyBy(
                new KeySelector<String, String>() {
                    @Override
                    public String getKey(String value) throws Exception {
                        return JSON.parseObject(value).getJSONObject("common").getString("mid");
                    }
                }
        );

        // 调用 AdjustIsNewProcessFunction 分析新老访客状态
        SingleOutputStreamOperator<String> isNewStream = midKeyedStream.process(new AdjustIsNewProcessFunction());

        return isNewStream;
    }

    /*
     * 1.数据清洗
     * 对APP流量日志数据进行清洗，过滤脏数据
     */
    private static DataStream<String> logCleaned(DataStream<String> logDataStream) {
        // 1. 定义存储脏数据的测流
        OutputTag<String> dirtyTag = new OutputTag<String>("dirty-log"){};

        // 2. 对数据进行清洗，过滤脏数据
        SingleOutputStreamOperator<String> cleanedStream = logDataStream.process(new ProcessFunction<String, String>() {
            @Override
            public void processElement(String value, Context ctx, Collector<String> out) throws Exception {
                try {
                    // 解析数据
                    JSON.parseObject(value);
                    // 输出数据
                    out.collect(value);
                } catch (Exception e) {
                    // 输出脏数据
                    ctx.output(dirtyTag, value);
                }
            }
        });

        // 3.输出测流，存储kafka
        //如果没有脏数据 可以手动发送测试一下
        // kafka-console-producer.sh --broker-list node101:9092,node102:9092,node103:9092 --topic topic-log
        DataStream<String> dirtyStream = cleanedStream.getSideOutput(dirtyTag);
        KafkaUtil.producerKafka(dirtyStream, "dwd-traffic-dirty-log");

        //4.返回处理后的数据
        return cleanedStream;
    }


}
