package com.zhu.app.dwd;

import com.zhu.utils.MySqlUtil;
import com.zhu.utils.ZhuKafkaUtil;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.hadoop.yarn.webapp.example.MyApp;

import java.time.ZoneId;

/**
 * 用户执行一次退单操作时，order_refund_info 会插入多条数据，同时 order_info 表的
 * 一条对应数据会发生修改，所以两张表不存在业务上的时间滞后问题，因此仅考虑可能的乱
 * 序即可，ttl 设置为 5s。
 */
public class DwdTradeOrderRefundApp {

    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment streamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment();
        streamExecutionEnvironment.setParallelism(1); //kafka topic partition 4

        StreamTableEnvironment tableEnvironment =StreamTableEnvironment.create(streamExecutionEnvironment);


        Configuration configuration = tableEnvironment.getConfig().getConfiguration();
        configuration.setString("table.exec.state.ttl","5 s");

        //check point
        //状态后端
        /*
        streamExecutionEnvironment.setStateBackend(new HashMapStateBackend());
        streamExecutionEnvironment.getCheckpointConfig().setCheckpointStorage(ClusterParametersConfig.HDFS_CHECKPOINT_FILE_DIR);  //检查点保存在hdfs
        System.setProperty("HADOOP_USER_NAME", "zhu");
        streamExecutionEnvironment.getCheckpointConfig().setCheckpointTimeout(10 * 60000L);  //TimeOut
        streamExecutionEnvironment.getCheckpointConfig().setMaxConcurrentCheckpoints(2);  //最大共存检查点
        streamExecutionEnvironment.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 5 * 1000L));  //重启策略
        */

        //todo topic_db
        tableEnvironment.executeSql(ZhuKafkaUtil.getTopicDB("dwd_trade_order_refund"));

        //todo 读取退单数据
        Table orderRefundInfo = tableEnvironment.sqlQuery("select " +
                "data['id'] id, " +
                "data['user_id'] user_id, " +
                "data['order_id'] order_id, " +
                "data['sku_id'] sku_id, " +
                "data['refund_type'] refund_type, " +
                "data['refund_num'] refund_num, " +
                "data['refund_amount'] refund_amount, " +
                "data['refund_reason_type'] refund_reason_type, " +
                "data['refund_reason_txt'] refund_reason_txt, " +
                "data['create_time'] create_time, " +
                "pt " +
                "from topic_db " +
                "where `table` = 'order_refund_info' " +
                "and `type` = 'insert' "
        );
        tableEnvironment.createTemporaryView("order_refund_info",orderRefundInfo);

        //todo mysql base_dic
        tableEnvironment.executeSql(MySqlUtil.getBaseDicLooKupDDL());
        //todo 读取订单表数据 筛选退单数据
        Table orderInfoRefund = tableEnvironment.sqlQuery(
                "select " +
                        "data['id'] id, " +
                        "data['province_id'] province_id, " +
                        "`old` " +
                        "from topic_db " +
                        "where `table` = 'order_info'" +
                        "and `type` = 'update' " +
                        "and data['order_status'] = '1005' " +
                        "and `old`['order_status'] is not null"
        );
        tableEnvironment.createTemporaryView("order_info_refund",orderInfoRefund);

        //join
        Table result_table = tableEnvironment.sqlQuery(
                "select " +
                        "ri.id,\n" +
                        "ri.user_id,\n" +
                        "ri.order_id,\n" +
                        "ri.sku_id,\n" +
                        "oi.province_id,\n" +
                        "date_format(ri.create_time,'yyyy-MM-dd') date_id,\n" +
                        "ri.create_time,\n" +
                        "ri.refund_type,\n" +
                        "type_dic.dic_name,\n" +
                        "ri.refund_reason_type,\n" +
                        "reason_dic.dic_name,\n" +
                        "ri.refund_reason_txt,\n" +
                        "ri.refund_num,\n" +
                        "ri.refund_amount,\n" +
                        "current_row_timestamp() row_op_ts " +

                        "from order_refund_info ri " +
                        "join " +
                        "order_info_refund oi on " +
                        "ri.order_id = oi.id " +
                        "join " +
                        "base_dic for system_time as of ri.pt as type_dic " +
                        "on ri.refund_type = type_dic.dic_code " +
                        "join " +
                        "base_dic for system_time as of ri.pt as reason_dic " +
                        "on ri.refund_reason_type = reason_dic.dic_code"
        );
        tableEnvironment.createTemporaryView("result_table", result_table);

        tableEnvironment.executeSql("create table dwd_trade_order_refund(\n"
                        +
                        "id string,\n" +
                        "user_id string,\n" +
                        "order_id string,\n" +
                        "sku_id string,\n" +
                        "province_id string,\n" +
                        "date_id string,\n" +
                        "create_time string,\n" +
                        "refund_type_code string,\n" +
                        "refund_type_name string,\n" +
                        "refund_reason_type_code string,\n" +
                        "refund_reason_type_name string,\n" +
                        "refund_reason_txt string,\n" +
                        "refund_num string,\n" +
                        "refund_amount string,\n" +
                        "row_op_ts timestamp_ltz(3)\n" +
                        ")" +
                        ZhuKafkaUtil.getKafkaSinkDDL("dwd_trade_order_refund"));

        tableEnvironment.executeSql("insert into dwd_trade_order_refund select * from result_table").print();


        streamExecutionEnvironment.execute();

    }
}
