package com.flink.examples;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.jdbc.JdbcSink;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;
import org.model.BurnLogModel;
import org.model.OriBurnLogModel;
import org.schema.OriBurnLogSchema;
import org.util.CkSinkUtil;

/**
 * 刻录日志信息流处理模块
 * 1、负责将刻录日志存储到clickhouse中；
 * 2、结合告警规则，对特别日志信息进行告警，如critical_level>=7 todo
 *
 * 注意点：日志的重复存储？
 * checkpoint后续肯定要加上，现在我还不会，目前直接从最早日志开撸，要考虑幂等性存储，这里得着重看一下clickhouse的修改性能？
 */

public class BurnLogStream {
    /**
     * 将文件刻录数据存储到clickhouse中
     * @param dataStream
     */
    private static void loadSourceData2Ck(DataStream<OriBurnLogModel> dataStream) {
        String sql = CkSinkUtil.generateInsertSql(OriBurnLogModel.class, "ZJG_LOG_ORI.ORI_BURN_LOG");
        dataStream.addSink(
                JdbcSink.sink(
                        sql,
                        CkSinkUtil.getJdbcStatementBuilder(OriBurnLogModel.class),
                        CkSinkUtil.getJdbcExecutionOptions(),
                        CkSinkUtil.getJdbcConnectionOptions()
                )
        ).name("toOriBurnLog");
    }

    /**
     * 将转换后的数据，存储到clickhouse中
     * @param burnLogSource
     */
    private static void loadBurnLog2Ck(DataStream<BurnLogModel> burnLogSource) {
        String sql = CkSinkUtil.generateInsertSql(BurnLogModel.class, "ZJG_LOG_DWD.BURN_LOG");
        burnLogSource.addSink(
                JdbcSink.sink(
                        sql,
                        CkSinkUtil.getJdbcStatementBuilder(BurnLogModel.class),
                        CkSinkUtil.getJdbcExecutionOptions(),
                        CkSinkUtil.getJdbcConnectionOptions()
                )
        ).name("toBurnLog");
    }

    /**
     * 对刻录日志原始数据流进行转换，原始数据与刻录文件列表数据做笛卡尔积，形成宽表
     * @param oriSource
     * @return
     */
    private static DataStream<BurnLogModel> transOriBurnLogStream(DataStream<OriBurnLogModel> oriSource) {
        DataStream<BurnLogModel> transSource = oriSource.flatMap(new FlatMapFunction<OriBurnLogModel, BurnLogModel>() {
            @Override
            public void flatMap(OriBurnLogModel oriBurnLogModel, Collector<BurnLogModel> collector) throws Exception {
                JSONObject jsonTmp = JSON.parseObject(JSON.toJSONString(oriBurnLogModel));
                JSONArray files = jsonTmp.getJSONArray("burn_file_list");
                jsonTmp.remove("burn_file_list");
                for (int i = 0; i < files.size(); i++) {
                    JSONObject file = files.getJSONObject(i);
                    jsonTmp.putAll(file);
                    BurnLogModel burnLogModel = JSON.parseObject(jsonTmp.toJSONString(), BurnLogModel.class);
                    collector.collect(burnLogModel);

                    jsonTmp.remove("file_hash");
                    jsonTmp.remove("file_name");
                    jsonTmp.remove("file_path");
                    jsonTmp.remove("file_sec_level");
                    jsonTmp.remove("file_size");
                    jsonTmp.remove("file_type");
                }
            }
        });
        return transSource;
    }

    public static void main(String[] args) throws Exception {
        // 创建flink执行环境
        Configuration configuration = new Configuration();
        configuration.setString("rest.port","8081"); //指定 Flink Web UI 端口为9091
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(configuration);

        // 从Kafka中消费刻录记录数据
        KafkaSource<OriBurnLogModel> kafkaSource = KafkaSource.<OriBurnLogModel>builder()
                .setBootstrapServers("192.168.19.121:9092")
                .setTopics("burn-logs")
                .setGroupId("zjg-stream")
                .setStartingOffsets(OffsetsInitializer.earliest())
                .setValueOnlyDeserializer(new OriBurnLogSchema())
                .build();
        SingleOutputStreamOperator<OriBurnLogModel> oriSource =
                env.fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), "burn-log-topic")
                        .setParallelism(3);

        // 将刻录原始数据存储到clickhouse中
        loadSourceData2Ck(oriSource);

        // 将原始数据进行转换一波，针对刻录文件列表，与其他内容做笛卡尔积，然后存储到新表中
        DataStream<BurnLogModel> burnLogSource = transOriBurnLogStream(oriSource);
        loadBurnLog2Ck(burnLogSource);

        // 这里模拟一些流式数据的告警信息，暂时没有流式处理告警规则

        env.execute("burn-logs-stream");
    }
}
