package com.hhf.rrd.readtime_report;

import com.hhf.rrd.readtime_report.model.Transaction;
import org.apache.commons.lang3.StringUtils;
import org.apache.flink.api.common.functions.RichFlatMapFunction;
import org.apache.flink.api.common.functions.RichMapFunction;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSink;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.api.Tumble;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.descriptors.FileSystem;
import org.apache.flink.table.descriptors.Json;
import org.apache.flink.table.descriptors.Schema;
import org.apache.flink.types.Row;
import org.apache.flink.util.Collector;

import java.sql.Timestamp;

import static org.apache.flink.table.api.Expressions.*;

/**
 * 实时报表Job
 *      开启socket端口：nc -l 9999
 *      查看端口占用: lsof -i :9999
 *
 * @author huanghaifeng15
 * @date 2022/2/12 07:49
 **/
public class ReportJob {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.enableCheckpointing(5000);

        // 设置 socket 数据源
        DataStreamSource<String> dataStreamSource = env.socketTextStream("localhost", 9999);

        // 对socket数据进行 transformation
        KeyedStream<Transaction, Long> keyedStream = dataStreamSource.flatMap(new RichFlatMapFunction<String, Transaction>() {
            @Override
            public void flatMap(String value, Collector<Transaction> out) throws Exception {
                if (StringUtils.isNotBlank(value)) {
                    String[] wordArr = value.split(",");
                    int minArrLength = 3;
                    if (wordArr.length < minArrLength) {
                        return;
                    }
                    Transaction account = new Transaction();
                    account.setAccountId(Long.parseLong(wordArr[0]));
                    account.setAmount(Double.valueOf(wordArr[1]));
                    account.setTransactionTime(new Timestamp(Long.parseLong(wordArr[2])));
                    out.collect(account);
                }
            }
        }).map(new RichMapFunction<Transaction, Transaction>() {
            @Override
            public Transaction map(Transaction value) throws Exception {
                return value;
            }
        }).keyBy(new KeySelector<Transaction, Long>() {
            @Override
            public Long getKey(Transaction value) throws Exception {
                return value.getAccountId();
            }
        });
        keyedStream.print("report --> ");

        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
        Table table = tableEnv.fromDataStream(keyedStream);
        tableEnv.createTemporaryView("transactions",table);

        // tableEnv.executeSql("CREATE TABLE transactions (\n" +
        //         "    accountId  BIGINT,\n" +
        //         "    amount      BIGINT,\n" +
        //         "    transactionTime TIMESTAMP(3),\n" +
        //         "    WATERMARK FOR transactionTime AS transaction_time - INTERVAL '5' SECOND\n" +
        //         ") WITH (\n" +
        //         "    'connector' = 'kafka',\n" +
        //         "    'topic'     = 'transactions',\n" +
        //         "    'properties.bootstrap.servers' = 'kafka:9092',\n" +
        //         "    'format'    = 'csv'\n" +
        //         ")");

        // tableEnv.executeSql("CREATE TABLE spend_report (\n" +
        //         "    account_id BIGINT,\n" +
        //         "    log_ts     TIMESTAMP(3),\n" +
        //         "    amount     BIGINT\n," +
        //         "    PRIMARY KEY (account_id, log_ts) NOT ENFORCED" +
        //         ") WITH (\n" +
        //         "   'connector'  = 'jdbc',\n" +
        //         "   'url'        = 'jdbc:mysql://localhost:3306/healthy-report',\n" +
        //         "   'table-name' = 'spend_report',\n" +
        //         "   'driver'     = 'com.mysql.jdbc.Driver',\n" +
        //         "   'username'   = 'root',\n" +
        //         "   'password'   = 'root'\n" +
        //         ")");

        // Table table = tableEnv.from("transactions");

        Table selectTable = table.select($("accountId").as("account_id"),
                        // $("transactionTime").as("transaction_time"),
                        // $("transaction_time").floor(TimeIntervalUnit.HOUR).as("log_ts"),
                        call(MyFloor.class, $("transactionTime")).as("log_ts"),
                        $("amount").as("amount"));
                // .window(Tumble.over(lit(1).hour()).on($("transaction_time")).as("log_ts"))
                // .groupBy($("account_id"), $("log_ts"))
                // .select(
                //         $("account_id"),
                //         // 对应window算子
                //         // $("log_ts").start().as("log_ts"),
                //         $("log_ts"),
                //         $("amount").sum().as("amount"));

        // DataStreamSink<Tuple2<Boolean, Row>> streamSink = tableEnv.toRetractStream(selectTable, Row.class).print();

        tableEnv.executeSql("CREATE TABLE spend_report (\n" +
                "    account_id BIGINT,\n" +
                "    log_ts     TIMESTAMP(9),\n" +
                "    amount     DOUBLE\n" +
                ") WITH (\n" +
                "   'connector'  = 'filesystem',\n" +
                "   'path'       = 'file://data/out',\n" +
                "   'format' = 'json'\n" +
                ")");

        /**
         * 重点：
         *      普通的group by的话，那么它的结果就是有更新的，所以需要sink支持写入update的结果，
         * 但是kafka目前只能写入append的数据，所以会报上面的错误。
         *
         *      kafka、file 都不能跟新，但是mysql可以
         *
         * 可以参考：Group window
         *      https://nightlies.apache.org/flink/flink-docs-release-1.12/dev/table/sql/queries.html#group-windows
         * **/
        TableResult tableResult = selectTable.executeInsert("spend_report");
        tableResult.print();

        env.execute();
    }
}
