package com.myflink.day10;

import com.myflink.bean.WaterSensor;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.typeinfo.TypeHint;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.java.StreamTableEnvironment;
import org.apache.flink.table.descriptors.FileSystem;
import org.apache.flink.table.descriptors.OldCsv;
import org.apache.flink.table.descriptors.Schema;
import org.apache.flink.types.Row;

/**
 * @author Shelly An
 * @create 2020/9/27 9:34
 */
public class SQL_TableAPI {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

        //读取数据、转换
        DataStreamSource<String> socketDS = env.readTextFile("input/sensor-data.log");
        SingleOutputStreamOperator<WaterSensor> sensorDS = socketDS
                .map(new MapFunction<String, WaterSensor>() {
                    @Override
                    public WaterSensor map(String value) throws Exception {
                        String[] datas = value.split(",");
                        return new WaterSensor(datas[0], Long.valueOf(datas[1]), Integer.valueOf(datas[2]));
                    }
                })
                .assignTimestampsAndWatermarks(
                        new BoundedOutOfOrdernessTimestampExtractor<WaterSensor>(Time.seconds(3)) {
                            @Override
                            public long extractTimestamp(WaterSensor element) {
                                return element.getTs() * 1000L;
                            }
                        }
                );

        /*---------------------------------------------------------------------------------------*/
        //1. 创建 表执行环境
//        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        EnvironmentSettings settings = EnvironmentSettings.newInstance()
                .useOldPlanner() //使用官方的planner
                //.useBlinkPlanner()    //使用blink的planner
                .inStreamingMode() //默认流 可设置批
                .build();

        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, settings);

        //2. 把datastream转换成table  对象名是就属性名，匹配，不按顺序
        Table sensorTable = tableEnv.fromDataStream(sensorDS, "id,ts as timestamp,vc");

        //3. 使用api进行处理
        Table result1Table = sensorTable
                .filter("id=='sensor_1'")
                //.where() //底层调用的就是filter
                .select("id,timestamp");


        //4.可转成指定类型（自动识别的类型不是自己想要的，用这个）
        DataStream<Tuple2<String, Long>> result1DS = tableEnv.toAppendStream(
                result1Table,
                TypeInformation.of(new TypeHint<Tuple2<String, Long>>() {
                }));

        result1DS.print();


        //4. 把table转成datastream Row.class帮你自动识别类型
        DataStream<Row> resultDS = tableEnv.toAppendStream(result1Table, Row.class);

        resultDS.print();

        /*------------------------------api 操作------------------------------------*/


        /**
         * 流式数据处理
         * 必须groupby写在前面  即按照执行顺序调用
         */
        Table resultTable = sensorTable
                .groupBy("id")
//                .aggregate("count(id) as cnt")
                .select("id,count(id)");

        //如果需要更新数据，要使用toRetractStream =>撤回流
        //更新过程：将老的数据删掉，再将新的数据插入
        //false删除，true添加
        DataStream<Tuple2<Boolean, Row>> result2DS = tableEnv.toRetractStream(resultTable, Row.class);

        result2DS.print();
        /*------------------------------api 存储------------------------------------*/
        /**
         * 不转成流，直接保存到本地文件系统
         */

        //连接外部系统，将外部系统抽象成一个table对象，需要指定存储格式，标的结构信息（字段名、类型），表民
        //1. connect() 外部系统的连接描述器 文件，还有kafka、es
        //2. withFormat 指定外部系统数据的存储格式
        //3. withSchema 要抽象成的table的Schema信息，有字段名、字段类型
        //4. createTemporaryTable 给抽象成的table一个表名
        tableEnv
                //文件，还有kafka、es
                .connect(new FileSystem().path("output/flink.txt"))
                //存储格式 新版的csv要手动导入依赖
                .withFormat(new OldCsv().fieldDelimiter("|"))
                //结构信息 列名、列的类型
                .withSchema(new Schema()
                        .field("id", DataTypes.STRING())
                        .field("hahaha", DataTypes.INT()))
                .createTemporaryTable("fsTable");


        Table result2Table = sensorTable
                .select("id,vc");

        //使用tableapi中的insertinto，把一张表的数据插入到另一张表（外部系统抽象成的table）
        result2Table.insertInto("fsTable");

        env.execute();
    }
}
