package com.nepu.gmall.realtime.app.dwd;

import com.nepu.gmall.realtime.util.KafkaUtils;
import com.nepu.gmall.realtime.util.MysqlUtil;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import java.time.Duration;

/**
 *
 * 本类处理的是交易域退单事务型事实表：
 *  处理的逻辑是:
 *      （1）从topic_db中读取数据
 *      （2）过滤出退单表中的数据
 *      （3）读取订单详情表中的数据
 *      （4）读取字典表中的数据
 *      （5）将这三张表进行join
 *      （6）将数据写出
 *
 * 这里只用到了topic_db，所以只需要启动kafka和zookeeper就可以了
 * @author chenshuaijun
 * @create 2023-02-27 19:58
 */
public class DwdTradeOrderRefund {

    public static void main(String[] args) {

        // TODO 1、创建执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
        // 设置DDL
        tableEnv.getConfig().setIdleStateRetention(Duration.ofSeconds(5));
        // 设置checkpoint的信息：设置checkpoint的间隔是5分钟,并且checkpoint的级别是精确一次性
        /*env.enableCheckpointing(5 * 60000L, CheckpointingMode.EXACTLY_ONCE);
        // 设置checkpoint的超时时间是10分钟
        env.getCheckpointConfig().setCheckpointTimeout(10 * 60000L);
        // 设置外部检查点。可以将检查点的元数据信息定期写入外部系统，这样当job失败时，检查点不会被清除。这样如果job失败，可以从检查点恢复job。
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        // 设置checkpoint的重启的策略
        env.setRestartStrategy(RestartStrategies.failureRateRestart(10, Time.of(1L, TimeUnit.DAYS), Time.of(3L, TimeUnit.MINUTES)));
        // 设置两个checkpoint之间的最小的间隔时间
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3000L);
        // 设置状态后端: 设置状态后端为内存级别
        env.setStateBackend(new HashMapStateBackend());
        // 设置checkpoint的存储的路径
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/checkpoint");
        // 因为我们的HDFS只有atguigu用户才能够操作，所以要将用户设置为atguigu
        System.setProperty("HADOOP_USER_NAME", "atguigu");*/
        // TODO 2、读取topic_db中的数据
        tableEnv.executeSql(MysqlUtil.getTopicDBLookUpDDL("DwdTradeOrderRefund"));
        // TODO 3、过滤出退单表中的数据
        Table orderRefundInfo = tableEnv.sqlQuery("" +
                "select " +
                "    `data`['id'] id, " +
                "    `data`['user_id'] user_id, " +
                "    `data`['order_id'] order_id, " +
                "    `data`['sku_id'] sku_id, " +
                "    `data`['refund_type'] refund_type, " +
                "    `data`['refund_num'] refund_num, " +
                "    `data`['refund_amount'] refund_amount, " +
                "    `data`['refund_reason_type'] refund_reason_type, " +
                "    `data`['refund_reason_txt'] refund_reason_txt, " +
                "    `data`['refund_status'] refund_status, " +
                "    `data`['create_time'] create_time, " +
                "    proc_time " +
                "from topic_db " +
                "where `table`='order_refund_info' and `type`='insert'");

        tableEnv.createTemporaryView("order_refund_info",orderRefundInfo);

        // TODO 4、读取订单详细表中的数据
        Table orderInfoRefund = tableEnv.sqlQuery("select " +
                "data['id'] id, " +
                "data['province_id'] province_id, " +
                "`old` " +
                "from topic_db " +
                "where `table` = 'order_info' " +
                "and `type` = 'update' " +
                "and data['order_status']='1005' " +
                "and `old`['order_status'] is not null");

        tableEnv.createTemporaryView("order_info_refund", orderInfoRefund);
        // TODO 5、读取字典表中的数据
        tableEnv.executeSql(MysqlUtil.getBaseDicLookUpDDL());
        // TODO 6、将三张表进行join
        Table resultTable = tableEnv.sqlQuery("" +
                " select" +
                "    oi.id, " +
                "    oi.user_id , " +
                "    oi.order_id , " +
                "    oi.sku_id , " +
                "    oi.refund_type , " +
                "    rt_dic.dic_name refund_type_name, " +
                "    oi.refund_num , " +
                "    oi.refund_amount , " +
                "    oi.refund_reason_type , " +
                "    reason_dic.dic_name refund_reason_type_name, " +
                "    oi.refund_reason_txt , " +
                "    oi.refund_status , " +
                "    oi.create_time , " +
                "    oir.province_id, " +
                "    date_format(oi.create_time,'yyyy-MM-dd') date_id " +
                "from order_refund_info oi join order_info_refund oir on oi.order_id=oir.id " +
                "join base_dic for system_time as of oi.proc_time reason_dic on oi.refund_reason_type=reason_dic.dic_code " +
                "join base_dic for system_time as of oi.proc_time rt_dic on oi.refund_type = rt_dic.dic_code");

        tableEnv.createTemporaryView("result_table",resultTable);
        // TODO 7、将数据写出到kafka
        tableEnv.executeSql("" +
                "create table dwd_trade_order_refund( " +
                "    id string, " +
                "    user_id string , " +
                "    order_id string , " +
                "    sku_id string , " +
                "    refund_type string , " +
                "    refund_type_name string, " +
                "    refund_num string , " +
                "    refund_amount string , " +
                "    refund_reason_type string , " +
                "    refund_reason_type_name string, " +
                "    refund_reason_txt string , " +
                "    refund_status string , " +
                "    create_time string , " +
                "    province_id string, " +
                "    date_id string " +
                ")"+KafkaUtils.getKafkaSinkDDL("dwd_trade_order_refund"));

        tableEnv.executeSql("insert into dwd_trade_order_refund select * from result_table");
    }
}
