package com.atguigu.gmallrealtime.app.dwd.db;

import com.atguigu.gmallrealtime.common.Constant;
import com.atguigu.gmallrealtime.util.HBaseUtil;
import com.atguigu.gmallrealtime.util.MyKafkaUtil;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.omg.IOP.TAG_ALTERNATE_IIOP_ADDRESS;

import java.time.Duration;

/**
 * @author yhm
 * @create 2023-09-27 14:08
 */
public class DwdTradeOrderRefund {
    public static void main(String[] args) {
        // TODO 1 创建flink环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        tableEnv.getConfig().setIdleStateRetention(Duration.ofSeconds(5L));

        // TODO 2 添加检查点和状态后端
        //        env.enableCheckpointing(3000L, CheckpointingMode.EXACTLY_ONCE);
        //
        //        //2.2 设置检查点超时时间
        //        env.getCheckpointConfig().setCheckpointTimeout(60000L);
        //        //2.3 设置job取消之后 检查点是否保留
        //        env.getCheckpointConfig().setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        //        //2.4 设置两个检查点之间最小的时间间隔
        //        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(2000L);
        //        //2.5 设置重启策略
        //        // env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3,3000L));
        //        env.setRestartStrategy(RestartStrategies.failureRateRestart(3, Time.days(30), Time.seconds(3)));
        //
        //        env.setStateBackend(new HashMapStateBackend());
        //        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/gmall/ck");
        //
        //        System.setProperty("HADOOP_USER_NAME","atguigu");


        // TODO 3 读取topic_db数据
        String groupId = "dwd_trade_order_refund";
        tableEnv.executeSql(MyKafkaUtil.getTopicDbDDL(groupId));

        // TODO 4 筛选退单明细表数据
        Table orTable = tableEnv.sqlQuery("select \n" +
                "  data['id'] id, \n" +
                "  data['user_id'] user_id, \n" +
                "  data['order_id'] order_id, \n" +
                "  data['sku_id'] sku_id, \n" +
                "  data['refund_type'] refund_type, \n" +
                "  data['refund_num'] refund_num, \n" +
                "  data['refund_amount'] refund_amount, \n" +
                "  data['refund_reason_type'] refund_reason_type, \n" +
                "  data['refund_reason_txt'] refund_reason_txt, \n" +
                "  data['refund_status'] refund_status, \n" +
                "  data['create_time'] create_time, \n" +
                "  ts,\n" +
                "  proc_time\n" +
                "from topic_db\n" +
                "where `table`='order_refund_info'\n" +
                "and `type`='insert'");

        tableEnv.createTemporaryView("or_table",orTable);

        // TODO 5 筛选订单表数据退单的变化数据
        Table oiTable = tableEnv.sqlQuery("select\n" +
                "  data['id'] id,\n" +
                "  data['province_id'] province_id\n" +
                "from topic_db\n" +
                "where `table`='order_info'\n" +
                "and `type`='update'\n" +
                "and `data`['order_status'] = '1005'\n" +
                "and `old`['order_status'] is not null");

        tableEnv.createTemporaryView("oi",oiTable);

        // TODO 6 字典表数据
        tableEnv.executeSql(HBaseUtil.getBaseDicDDL());

        // TODO 7 JOIN合并退单明细和订单变化数据 lookup join合并字典表数据
        Table resultTable = tableEnv.sqlQuery("select\n" +
                "    or_table.id,\n" +
                "    user_id,\n" +
                "    order_id,\n" +
                "    sku_id,\n" +
                "    refund_type,\n" +
                "    b_type.info.dic_name refund_type_name,\n" +
                "    refund_num,\n" +
                "    refund_amount,\n" +
                "    refund_reason_type,\n" +
                "    b_reason.info.dic_name refund_reason_type_name,\n" +
                "    refund_reason_txt,\n" +
                "    refund_status,\n" +
                "    create_time,\n" +
                "    province_id,\n" +
                "    proc_time\n" +
                "from or_table\n" +
                "join oi \n" +
                "on or_table.order_id=oi.id\n" +
                "left join base_dic FOR SYSTEM_TIME AS OF or_table.proc_time b_type \n" +
                "on or_table.refund_type = b_type.rowkey\n" +
                "left join base_dic FOR SYSTEM_TIME AS OF or_table.proc_time b_reason\n" +
                "on or_table.refund_reason_type = b_reason.rowkey");

        tableEnv.createTemporaryView("result_table",resultTable);

        // TODO 8 写出到kafka
        tableEnv.executeSql("create table kafka_sink(\n" +
                "    id STRING,\n" +
                "    user_id STRING,\n" +
                "    order_id STRING,\n" +
                "    sku_id STRING,\n" +
                "    refund_type STRING,\n" +
                "    refund_type_name STRING,\n" +
                "    refund_num STRING,\n" +
                "    refund_amount STRING,\n" +
                "    refund_reason_type STRING,\n" +
                "    refund_reason_type_name STRING,\n" +
                "    refund_reason_txt STRING,\n" +
                "    refund_status STRING,\n" +
                "    create_time STRING,\n" +
                "    province_id STRING,\n" +
                "    proc_time TIMESTAMP(3),\n" +
                "    PRIMARY KEY (id) NOT ENFORCED \n" +
                ")"
         + MyKafkaUtil.getUpsertKafkaDLL(Constant.TOPIC_DWD_TRADE_ORDER_REFUND));

        tableEnv.executeSql("insert into kafka_sink select * from result_table");

    }
}
