package com.hkbigdata.sink;

import com.alibaba.fastjson.JSON;
import com.hkbigdata.bean.WaterSensor;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.util.Collector;

import java.util.ArrayList;

/**
 * @author liuanbo
 * @creat 2024-04-12-17:28
 * @see 2194550857@qq.com
 */
public class Flink01_Sink_Kafka_Anonymous {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        ArrayList<WaterSensor> waterSensors = new ArrayList<>();
        waterSensors.add(new WaterSensor("sensor_1", 1607527992000L, 20.0));
        waterSensors.add(new WaterSensor("sensor_1", 1607527994000L, 50.0));
        waterSensors.add(new WaterSensor("sensor_1", 1607527996000L, 50.0));
        waterSensors.add(new WaterSensor("sensor_2", 1607527993000L, 10.2));
        waterSensors.add(new WaterSensor("sensor_2", 1607527995000L, 30.2));

        env.fromCollection(waterSensors)
                .map(JSON::toJSONString)
                .addSink(new FlinkKafkaProducer<String>("hadoop102:9092", "sensor", new SimpleStringSchema()));


//        reduce.print();
        /**
         * 1.离线数仓 hive on spark
         * 2.实时数仓 flink
         * 3.spark streaming实时数仓 or spark sql 离线数仓
         * 4.hudi datalake 湖仓一体
         * 5.数据治理
         * 6.数据中台
         * 7.数据挖掘
         * 8.用户画像
         */

        env.execute();


    }
}
