package com.nepu.gmall.realtime.app.dwd;

import com.nepu.gmall.realtime.util.KafkaUtils;
import com.nepu.gmall.realtime.util.MysqlUtil;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.types.Row;

import java.time.Duration;

/**
 * 本类处理的的是交易域支付成功事务型事实表
 *  业务的逻辑是：
 *      （1）首先从topic_db中读取数据
 *      （2）过滤出table='payment'的数据，并且我们的过滤条件是`type`='update' and payment_status='1602'
 *      （3）从订单详情表中读取订单的数据
 *      （4）读取字典表
 *      （5）将三表关联，得到支付成功的订单数据
 *      （6）数据写出到kafka
 *      
 * 数据的流向
 *  mock --> maxwell --> kafka --> topic_db
 *  mock --> maxwell --> kafka --> topic_db --> DwdTradeOrderPreProcess --> DwdTradeOrderDetail --> DwdTradePayDetailSuc --> dwd_trade_order_detail
 *  base_dic
 *  
 * @author chenshuaijun
 * @create 2023-02-27 17:18
 */
public class DwdTradePayDetailSuc {

    public static void main(String[] args) {
        // TODO 1、读取数据流
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
        // 因为这里涉及到表的join。所以要设置一下状态的过期时间
        tableEnv.getConfig().setIdleStateRetention(Duration.ofSeconds(905));
        // 设置checkpoint的信息：设置checkpoint的间隔是5分钟,并且checkpoint的级别是精确一次性
        /*env.enableCheckpointing(5 * 60000L, CheckpointingMode.EXACTLY_ONCE);
        // 设置checkpoint的超时时间是10分钟
        env.getCheckpointConfig().setCheckpointTimeout(10 * 60000L);
        // 设置外部检查点。可以将检查点的元数据信息定期写入外部系统，这样当job失败时，检查点不会被清除。这样如果job失败，可以从检查点恢复job。
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        // 设置checkpoint的重启的策略
        env.setRestartStrategy(RestartStrategies.failureRateRestart(10, Time.of(1L, TimeUnit.DAYS), Time.of(3L, TimeUnit.MINUTES)));
        // 设置两个checkpoint之间的最小的间隔时间
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3000L);
        // 设置状态后端: 设置状态后端为内存级别
        env.setStateBackend(new HashMapStateBackend());
        // 设置checkpoint的存储的路径
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/checkpoint");
        // 因为我们的HDFS只有atguigu用户才能够操作，所以要将用户设置为atguigu
        System.setProperty("HADOOP_USER_NAME", "atguigu");*/
        // TODO 2、读取topic_log中的数据
        tableEnv.executeSql(MysqlUtil.getTopicDBLookUpDDL("DwdTradePayDetailSuc"));
        // TODO 3、过滤出支付成功的数据
        Table filterTable = tableEnv.sqlQuery("" +
                "select " +
                "    `data`['id'] id, " +
                "    `data`['out_trade_no'] out_trade_no, " +
                "    `data`['order_id'] order_id, " +
                "    `data`['user_id'] user_id, " +
                "    `data`['payment_type'] payment_type, " +
                "    `data`['trade_no'] trade_no, " +
                "    `data`['total_amount'] total_amount, " +
                "    `data`['subject'] subject, " +
                "    `data`['payment_status'] payment_status, " +
                "    `data`['create_time'] create_time, " +
                "    `data`['callback_time'] callback_time, " +
                "    `data`['callback_content'] callback_content, " +
                "    `proc_time` " +
                "from topic_db " +
                "where `table`='payment_info' and `type`='update' and `data`['payment_status'] = '1602'");
        tableEnv.createTemporaryView("payment_info",filterTable);

        tableEnv.toAppendStream(filterTable,Row.class).print();
        // TODO 4、拿到订单明细的数据
        tableEnv.executeSql("" +
                "create table dwd_trade_order_detail( " +
                "    id string, " +
                "    order_id string, " +
                "    user_id string, " +
                "    sku_id string, " +
                "    sku_name string, " +
                "    sku_num string, " +
                "    order_price string, " +
                "    province_id string, " +
                "    activity_id string, " +
                "    activity_rule_id string, " +
                "    coupon_id string, " +
                "    create_time string, " +
                "    source_id string, " +
                "    source_type_id string, " +
                "    source_type_name string, " +
                "    split_activity_amount string, " +
                "    split_coupon_amount string, " +
                "    split_total_amount string, " +
                "    row_op_ts TIMESTAMP_LTZ(3) " +
                        ")" + KafkaUtils.getKafkaDDL("dwd_trade_order_detail","DwdTradePayDetailSuc"));
        // TODO 5、拿到字典表的数据
        tableEnv.executeSql(MysqlUtil.getBaseDicLookUpDDL());
        // TODO 6、将三张表进行join
        Table resultTable = tableEnv.sqlQuery("" +
                "select " +
                "    od.id order_detail_id, " +
                "    od.order_id, " +
                "    od.user_id, " +
                "    od.sku_id, " +
                "    od.sku_name, " +
                "    od.province_id, " +
                "    od.activity_id, " +
                "    od.activity_rule_id, " +
                "    od.coupon_id, " +
                "    pi.payment_type payment_type_code, " +
                "    dic.dic_name payment_type_name, " +
                "    pi.callback_time, " +
                "    od.source_id, " +
                "    od.source_type_id, " +
                "    od.source_type_name, " +
                "    od.sku_num, " +
                "    od.split_activity_amount, " +
                "    od.split_coupon_amount, " +
                "    od.split_total_amount split_payment_amount, " +
                "    od.row_op_ts " +
                "from payment_info pi join dwd_trade_order_detail od on pi.order_id=od.order_id " +
                "join base_dic for system_time as of pi.proc_time dic on pi.payment_type=dic_code");

        tableEnv.createTemporaryView("result_table",resultTable);
        // TODO 7、将数据写出到kafka
        tableEnv.executeSql("" +
                "create table dwd_trade_pay_detail_suc( " +
                "    order_detail_id string, " +
                "    order_id string, " +
                "    user_id string, " +
                "    sku_id string, " +
                "    sku_name string, " +
                "    province_id string, " +
                "    activity_id string, " +
                "    activity_rule_id string, " +
                "    coupon_id string, " +
                "    payment_type_code string, " +
                "    payment_type_name string, " +
                "    callback_time string, " +
                "    source_id string, " +
                "    source_type_code string, " +
                "    source_type_name string, " +
                "    sku_num string, " +
                "    split_activity_amount string, " +
                "    split_coupon_amount string, " +
                "    split_payment_amount string, " +
                "    row_op_ts TIMESTAMP_LTZ(3), " +
                "    primary key(order_detail_id) not enforced " +
                ")"+KafkaUtils.getUpsertKafkaDDL("dwd_trade_pay_detail_suc"));

        tableEnv.executeSql("insert into dwd_trade_pay_detail_suc select * from result_table");


    }
}
