import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.connector.datagen.table.DataGenConnectorOptions;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.*;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import static org.apache.flink.table.api.Expressions.$;

public class Demo08_sql {


    /**
     * 获取流处理环境和table处理环境
     */
    public static Tuple2<StreamExecutionEnvironment, StreamTableEnvironment> getStreamTableEnvironment(){

        //1.获取flink的实时流环境
        StreamExecutionEnvironment streamEnv = StreamExecutionEnvironment.getExecutionEnvironment();

        //2.1 创建内部类然后设置流/批环境
        EnvironmentSettings settings = EnvironmentSettings.newInstance().build();
        //2.2 直接设置流/批环境，源码中，完全等价于2.1
        EnvironmentSettings.inStreamingMode();

        //3.如果使用的是流式处理的模式，可以不传第二个参数，默认就是流处理
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(streamEnv,settings);

        return Tuple2.of(streamEnv,tableEnv);
    }

    /**
     * 读取文件系统的数据源
     * @param env
     */
    public static void source_file(Tuple2<StreamExecutionEnvironment, StreamTableEnvironment> env){

        StreamExecutionEnvironment streamEnv = env.f0;
        StreamTableEnvironment tableEnv = env.f1;

        //创建文件数据临时视图的schema
        final Schema schema = Schema.newBuilder()
                .column("sensorId", DataTypes.STRING())
                .column("timestamp", DataTypes.BIGINT())
                .column("temperature", DataTypes.DOUBLE())
                .build();

        //把文件中数据生成临时视图
        tableEnv.createTemporaryTable(
                "csvSource",
                TableDescriptor
                        .forConnector("filesystem")
                        .schema(schema)
                        .option("path", "D:/data/data_flink/sensor")
                        .format(
                                FormatDescriptor
                                        .forFormat("csv")
                                        .option("field-delimiter", ",")
                                        .build()
                        )
                        .build()
        );

        //执行查询
        TableResult result = tableEnv.executeSql("select * from csvSource");
        result.print();
    }

    /**
     * 读取kafka的数据源
     * @param env
     */
    public static void source_kafka(Tuple2<StreamExecutionEnvironment,StreamTableEnvironment> env){
        StreamExecutionEnvironment streamEnv = env.f0;
        StreamTableEnvironment tableEnv = env.f1;

        //创建文件数据临时视图的schema
        final Schema schema = Schema.newBuilder()
                .column("sensorId", DataTypes.STRING())
                .column("timestamp", DataTypes.BIGINT())
                .column("temperature", DataTypes.DOUBLE())
                .build();

        //把kafka中数据生成临时视图
        tableEnv.createTemporaryTable(
                "kafkaSource",
                TableDescriptor.forConnector("kafka")
                        .schema(schema)
                        .option("topic", "supermarket")
                        .option("properties.group.id", "supermarket")
                        .option("scan.startup.mode","latest-offset")
                        .option("properties.bootstrap.servers", "node101:9092,node102:9092,node103:9092,node104:9092")
                        .format(
                                FormatDescriptor.forFormat("csv")
                                        .option("field-delimiter", ",")
                                        .build())
                        .build()
        );

        //执行查询
        tableEnv.executeSql("select sensorId,count(*) as c from kafkaSource group by sensorId").print();
    }

    /**
     * 随机生成数据
     */
    public static void source_randData(Tuple2<StreamExecutionEnvironment,StreamTableEnvironment> env){
        StreamExecutionEnvironment streamEnv = env.f0;
        StreamTableEnvironment tableEnv = env.f1;

        //生成两列数据
        tableEnv.createTemporaryTable(
                "SourceTable",
                TableDescriptor.forConnector("datagen")
                        .schema(
                                Schema.newBuilder()
                                        .column("string", DataTypes.STRING())
                                        .column("number", DataTypes.INT())
                                        .build())
                        .option(DataGenConnectorOptions.ROWS_PER_SECOND, 3L)
                        .option(DataGenConnectorOptions.NUMBER_OF_ROWS, 30L)
                        .build());

        //执行查询
        tableEnv.executeSql("select * from SourceTable").print();
    }

    /**
     * 读取 DataStream 数据源 转成 Table 类型 进行后续的处理
     * Table 表只能使用API的形式处理逻辑
     * @param env
     */
    public static void transform_dataStreamToTable(Tuple2<StreamExecutionEnvironment,StreamTableEnvironment> env){

        //1.创建环境
        StreamExecutionEnvironment streamEnv = env.f0;
        StreamTableEnvironment tableEnv = env.f1;

        //2.1 读取流数据，生成 DataStream
        DataStreamSource<String> lineDataStream = streamEnv.socketTextStream("node101",8888);
        //2.2 把原始数据转成 SensorReading 类型，方便后面的处理
        DataStream<Demo02_sensorReading> sensorDataStream = lineDataStream.map(line->{
                    String[] fields = line.split(",");
                    return new Demo02_sensorReading(fields[0],Long.parseLong(fields[1]),Double.parseDouble(fields[2]));
                });

        //3.把 DataStream 转成 Table
        Table sensorTable = tableEnv.fromDataStream(sensorDataStream);

        //4.调用Table对象中的API处理业务逻辑，把结果返回成Table类型
        Table resultTable = sensorTable
                .filter($("sensorId").isNotEqual("sensor1"))
                .groupBy($("sensorId"))
                .select($("sensorId"), $("temperature").sum().as("tempAll"));

        //5.执行这个操作
        TableResult result = resultTable.execute();

        //6.输出结果
        result.print();
    }



    /**
     * 读取 DataStream 数据源 转成 View 类型 进行后续的处理
     * View 视图可以使用sql语句处理逻辑
     * @param env
     */
    public static void transform_dataStreamToView(Tuple2<StreamExecutionEnvironment,StreamTableEnvironment> env){

        //1.创建环境
        StreamExecutionEnvironment streamEnv = env.f0;
        StreamTableEnvironment tableEnv = env.f1;

        //2.1 读取流数据，生成 DataStream
        DataStreamSource<String> lineDataStream = streamEnv.socketTextStream("node101",8888);
        //2.2 把原始数据转成 SensorReading 类型，方便后面的处理
        DataStream<Demo02_sensorReading> sensorDataStream = lineDataStream.map(line->{
                    String[] fields = line.split(",");
                    return new Demo02_sensorReading(fields[0],Long.parseLong(fields[1]),Double.parseDouble(fields[2]));
                });

        //3.把 DataStream 转成 View
        tableEnv.createTemporaryView("sensor",sensorDataStream);

        //4.利用创建的临时View 写成sql语句处理逻辑，直接返回结果
        TableResult result = tableEnv.executeSql(
            "select sensorId,sum(temperature) as tempAll " +
                    "from sensor " +
                    "where sensorId <> 'sensor1' " +
                    "group by sensorId"
        );

        //5.输出结果
        result.print();
    }

    /**
     * Table与View的互相转换
     * @param env
     */
    public static TableResult transform_TableAndView(Tuple2<StreamExecutionEnvironment,StreamTableEnvironment> env){
        //1.创建环境
        StreamExecutionEnvironment streamEnv = env.f0;
        StreamTableEnvironment tableEnv = env.f1;

        //2.1 读取流数据，生成 DataStream
        DataStreamSource<String> lineDataStream = streamEnv.socketTextStream("node101",8888);
        //2.2 把原始数据转成 SensorReading 类型，方便后面的处理
        DataStream<Demo02_sensorReading> sensorDataStream = lineDataStream.map(line->{
            String[] fields = line.split(",");
            return new Demo02_sensorReading(fields[0],Long.parseLong(fields[1]),Double.parseDouble(fields[2]));
        });

        //3.1 把 DataStream 转成 Table 类型
        Table sensorTable = tableEnv.fromDataStream(sensorDataStream);
        //3.1 第一个处理步骤，使用Table类型的API形式过滤掉不符合条件的sensorId
        Table filterSensorIdTable = sensorTable.filter($("sensorId").isNotEqual("sensor1"));
        //3.2 把第一个步骤的结果由Table转成View
        tableEnv.createTemporaryView("filterSensorId",filterSensorIdTable);
        //3.2 第二个处理步骤，利用临时View，写sql语句 按sensorId分组，求每组的温度总和
        //    sqlQuery 执行完sql语句后返回 Table 类型，可以进行后续处理
        Table tempAllTable = tableEnv.sqlQuery(
                "select sensorId,sum(temperature) as tempAll " +
                        "from filterSensorId " +
                        "group by sensorId"
        );
        tableEnv.createTemporaryView("res",tempAllTable);
//        TableResult result1 = tempAllTable.execute();

        //3.3 第二个处理步骤，利用临时View，写sql语句 按sensorId分组，求每组的温度总和
        //    executeSql 执行完sql语句后返回 TableResult 类型，就是最终的结果了
        TableResult result = tableEnv.executeSql(
                "select * from res"
        );

        return result;
    }

    /**
     * 把结果数据写入到文件中
     * @param env
     */
    public static void sink_file(Tuple2<StreamExecutionEnvironment,StreamTableEnvironment> env){
        //1.创建环境
        StreamExecutionEnvironment streamEnv = env.f0;
        StreamTableEnvironment tableEnv = env.f1;

        //2.1 读取流数据，生成 DataStream
        DataStreamSource<String> lineDataStream = streamEnv.socketTextStream("node101",8888);
        //2.2 把原始数据转成 SensorReading 类型，方便后面的处理
        DataStream<Demo02_sensorReading> sensorDataStream = lineDataStream.map(line->{
            String[] fields = line.split(",");
            return new Demo02_sensorReading(fields[0],Long.parseLong(fields[1]),Double.parseDouble(fields[2]));
        });

        //3.把 DataStream 转成 Table
        Table sensorTable = tableEnv.fromDataStream(sensorDataStream);

        //4.调用Table对象中的API处理业务逻辑，把结果返回成Table类型
        Table resultTable = sensorTable
                .filter($("sensorId").isNotEqual("sensor1"))
                .select($("sensorId"), $("temperature"));


        //5.1 创建输出表(结果数据)的表头(schema)信息
        final Schema schema = Schema.newBuilder()
                .column("sensorId", DataTypes.STRING())
                .column("temperature", DataTypes.DOUBLE())
                .build();

        //5.2 创建输出表
        tableEnv.createTemporaryTable(
                "csvSink",
                TableDescriptor
                        .forConnector("filesystem")
                        .schema(schema)
                        .option("path", "E://sensor")
                        .format(
                                FormatDescriptor
                                        .forFormat("csv")
                                        .option("field-delimiter", ",")
                                        .build()
                        )
                        .build()
        );

        //6.把结果数据写入到输出表中
        resultTable.executeInsert("csvSink");
    }

    /**
     * 把结果数据写入到kafka中
     * @param env
     */
    public static void sink_kafka(Tuple2<StreamExecutionEnvironment,StreamTableEnvironment> env){
        //1.创建环境
        StreamExecutionEnvironment streamEnv = env.f0;
        StreamTableEnvironment tableEnv = env.f1;

        //2.1 读取流数据，生成 DataStream
        DataStreamSource<String> lineDataStream = streamEnv.socketTextStream("node101",8888);
        //2.2 把原始数据转成 SensorReading 类型，方便后面的处理
        DataStream<Demo02_sensorReading> sensorDataStream = lineDataStream.map(line->{
            String[] fields = line.split(",");
            return new Demo02_sensorReading(fields[0],Long.parseLong(fields[1]),Double.parseDouble(fields[2]));
        });

        //3.把 DataStream 转成 Table
        Table sensorTable = tableEnv.fromDataStream(sensorDataStream);

        //4.调用Table对象中的API处理业务逻辑，把结果返回成Table类型
        Table resultTable = sensorTable
                .filter($("sensorId").isNotEqual("sensor1"))
                .select($("sensorId"), $("temperature"));


        //5.1 创建输出表(结果数据)的表头(schema)信息
        final Schema schema = Schema.newBuilder()
                .column("sensorId", DataTypes.STRING())
                .column("temperatureAll", DataTypes.DOUBLE())
                .build();

        //5.2 创建输出表
        tableEnv.createTemporaryTable(
                "kafkaSink",
                TableDescriptor.forConnector("kafka")
                        .schema(schema)
                        .option("topic", "supermarket")
                        .option("properties.group.id", "supermarket")
                        .option("properties.bootstrap.servers", "node101:9092,node102:9092,node103:9092,node104:9092")
                        .format(
                                FormatDescriptor.forFormat("csv")
                                        .option("field-delimiter", ",")
                                        .build())
                        .build()
        );

        //6.把结果数据写入到输出表中
        resultTable.executeInsert("kafkaSink");
    }

    public static void function_self(Tuple2<StreamExecutionEnvironment,StreamTableEnvironment> env){
        //1.创建环境
        StreamExecutionEnvironment streamEnv = env.f0;
        StreamTableEnvironment tableEnv = env.f1;

        //2.1 读取流数据，生成 DataStream
        DataStreamSource<String> lineDataStream = streamEnv.socketTextStream("node101",8888);
        //2.2 把原始数据转成 SensorReading 类型，方便后面的处理
        DataStream<Demo02_sensorReading> sensorDataStream = lineDataStream.map(line->{
            String[] fields = line.split(",");
            return new Demo02_sensorReading(fields[0],Long.parseLong(fields[1]),Double.parseDouble(fields[2]));
        });

        //3.把 DataStream 转成 View
        tableEnv.createTemporaryView("sensor",sensorDataStream);
        //4.把自定义类创建成一个函数
        tableEnv.createTemporaryFunction("tempPlus",new Demo08_function());

        //5.利用自定义函数查询数据
        TableResult result = tableEnv.executeSql(
                "select sensorId,tempPlus(temperature) as tempPlus from sensor"
        );

        //6.输出结果
        result.print();
    }

    public static void main(String[] args) {

        Tuple2<StreamExecutionEnvironment,StreamTableEnvironment> env = getStreamTableEnvironment();

//        source_file(env);
//        source_kafka(env);
//        source_randData(env);

//        transform_dataStreamToTable(env);
//        transform_dataStreamToView(env);
//        TableResult result = transform_TableAndView(env);

//        sink_file(env);
//        sink_kafka(env);

        function_self(env);

    }


}
