package com.atguigu.chapter11;

import com.atguigu.chapter05.WaterSensor;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.descriptors.Elasticsearch;
import org.apache.flink.table.descriptors.Json;
import org.apache.flink.table.descriptors.Kafka;
import org.apache.flink.table.descriptors.Schema;
import org.apache.kafka.clients.producer.ProducerConfig;

import java.time.Duration;

import static org.apache.flink.table.api.Expressions.$;

/**
 * TODO
 *
 * @author cjp
 * @version 1.0
 * @date 2021/3/12 9:30
 */
public class Flink07_TableAPI_Connector_ESSink {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        SingleOutputStreamOperator<WaterSensor> sensorDS = env
//                .socketTextStream("localhost", 9999)
                .readTextFile("input/sensor.csv")
                .map(new MapFunction<String, WaterSensor>() {
                    @Override
                    public WaterSensor map(String value) throws Exception {
                        // 切分
                        String[] line = value.split(",");
                        return new WaterSensor(line[0], Long.parseLong(line[1]), Integer.parseInt(line[2]));

                    }
                })
                .assignTimestampsAndWatermarks(
                        WatermarkStrategy
                                .<WaterSensor>forBoundedOutOfOrderness(Duration.ofSeconds(3))
                                .withTimestampAssigner((value, ts) -> value.getTs() * 1000L)
                );


        // TODO - Connector外部系统，写 ES
        EnvironmentSettings setttings = EnvironmentSettings.newInstance()
                .inStreamingMode()
                .useOldPlanner()
                .build();
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env,setttings);


        Table inputTable = tableEnv.fromDataStream(sensorDS);

        Table resultTable = inputTable
                .groupBy($("id"),$("vc"))
                .select($("id"), $("vc"),$("id").count().as("cnt"));

        tableEnv
                .connect(new Elasticsearch()
                        .host("hadoop102", 9200, "http")
                        .version("6")
                        .bulkFlushMaxActions(1)
                        .index("flinksql0923")
                        .documentType("_doc")
                        .keyDelimiter("|")  // upsert模式，groupby多个字段，用该分隔符拼接

                )
                .withFormat(new Json())
                .withSchema(new Schema()
//                        .field("id", DataTypes.STRING())
//                        .field("ts", DataTypes.BIGINT())
//                        .field("vc", DataTypes.INT())
                        .field("id", DataTypes.STRING())
                        .field("vc", DataTypes.INT())
                        .field("cnt",DataTypes.BIGINT())
                )
//                .inAppendMode()   // ES 需要指定 插入模式
                .inUpsertMode()   // ES 需要指定 插入模式
                .createTemporaryTable("esTable");


        resultTable.executeInsert("esTable");


        env.execute();
    }
}

/**
 * 通过 connector 将外部系统抽象成 动态表
 * 1、表环境.connect(外部系统描述器，指定相关参数)
 * 2、指定数据的存储格式
 * 3、指定 表结构
 * 4、指定 临时表的名字
 */
