package com.zhu.app.dwd;

import com.zhu.utils.MySqlUtil;
import com.zhu.utils.ZhuKafkaUtil;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import java.time.ZoneId;

/**
 * DWD层订单订单加购事实表  维度退化 维度数据相对较少 并且长时间不会改变
 * FlinkSql lookUp Join 一张表有处理时间字段
 * Lookup Join 通常在 Flink SQL 表和外部系统查询结果关联时使用。这种关联要求一张
 * 表（主表）有处理时间字段，而另一张表（维表）由 Lookup 连接器生成。
 * Lookup Join 做的是维度关联，而维度数据是有时效性的，那么我们就需要一个时间字
 * kafkaConnector  链接到kafka的表
 */
public class DWDTradeCartAddApp {

    public static void main(String[] args) throws Exception {

        //todo 1. streamEnvironment & tableEnvironment
        StreamExecutionEnvironment streamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment();
        streamExecutionEnvironment.setParallelism(4);    //kafka partition 4
        StreamTableEnvironment tableEnvironment = StreamTableEnvironment.create(streamExecutionEnvironment);

        //get KafkaDDL getKafkaSinkDDL  kak
        tableEnvironment.getConfig().setLocalTimeZone(ZoneId.of("GMT+8"));
        Configuration configuration = tableEnvironment.getConfig().getConfiguration();
        configuration.setString("table.exec.state.ttl","5 s");  //流表中数据五秒过期

        //cheakPoint
          /*
        streamExecutionEnvironment.enableCheckpointing(5 * 60000L, CheckpointingMode.EXACTLY_ONCE); //精确一次
        //状态后端
        streamExecutionEnvironment.setStateBackend(new HashMapStateBackend());
        streamExecutionEnvironment.getCheckpointConfig().setCheckpointStorage(ClusterParametersConfig.HDFS_CHECKPOINT_FILE_DIR);  //检查点保存在hdfs
        System.setProperty("HADOOP_USER_NAME", "zhu");
        streamExecutionEnvironment.getCheckpointConfig().setCheckpointTimeout(10 * 60000L);  //TimeOut
        streamExecutionEnvironment.getCheckpointConfig().setMaxConcurrentCheckpoints(2);  //最大共存检查点
        streamExecutionEnvironment.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 5 * 1000L));  //重启策略
         */
        //todo 3. get kafka base dwd data
            tableEnvironment.executeSql(ZhuKafkaUtil.getTopicDB("cart_add_zhu_2023"));

        //cart  考虑insert数据后 过了一段时间又upset: 更新数据，old数据有sku_num ,新的sku_num > 旧的   sku_num 新增数据，直接用，更改数据 新的减旧的
        Table cartAdd = tableEnvironment.sqlQuery(
                "select data['id'] id," +
                        "data['user_id'] user_id," +
                        "data['sku_id'] sku_id, " +
                        "data['cart_price'] cart_price, " +
                        "data['sku_name'] sku_name, " +
                        "data['is_checked'] is_checked, " +
                        "data['create_time'] create_time, " +
                        "data['operate_time'] operate_time, " +
                        "data['is_ordered'] is_ordered, " +
                        "data['order_time'] order_time, " +
                        "data['source_id'] source_id, " +
                        "data['source_type'] source_type," +
                        "if(`type`='insert',`data`['sku_num']," +
                        "cast(" +
                        "( cast(`data`['sku_num'] as int) - cast(`old`['sku_num'] as int) ) as string" +
                        ") " +
                        ") sku_num, " +
                        "pt " +
                        "from topic_db " +
                        "where `database` = 'flink' and `table` = 'cart_info' " +
                        "and ( " +
                        "`type` = 'insert' " +
                        "or " +
                        "( " +
                        "`type` = 'update' and `old`['sku_num'] is not null and cast(data['sku_num'] as int)  > cast(`old`['sku_num'] as int) " +
                        "))");
        //将表转换为流测试
      //  tableEnvironment.toAppendStream(cartAdd, Row.class).print();
    //    streamExecutionEnvironment.execute();


        tableEnvironment.createTemporaryView("cart_add",cartAdd);
        //create Dic_dim_table  加载维表  设置缓存
        tableEnvironment.executeSql(MySqlUtil.getBaseDicLooKupDDL());

        //look up join

        Table lookUpJoinCartAddTable = tableEnvironment.sqlQuery(
                "select " +
                        "id," +
                        "user_id, " +
                        "sku_id, " +
                        "source_id, " +
                        "source_type as source_type_code, " +
                        "dic_name as source_type_name, " +
                        "ca.create_time as create_time, " +
                        "sku_num " +
                        "from cart_add ca " +
                        "join base_dic for system_time as of ca.pt `dic`" +
                        "on ca.source_type=dic.dic_code"
        );
        tableEnvironment.createTemporaryView("lookup_cartAdd_table",lookUpJoinCartAddTable);


        //todo write to kafka topic
        //输出到kafka的外部表
        tableEnvironment.executeSql("" +
                "create table dwd_trade_cart_add(" +
                "id string," +
                "user_id string," +
                "sku_id string," +
                "source_id string," +
                "source_type string," +
                "source_type_name string," +
                "create_time string, " +
                "sku_num string, " +
                "create_time string " +
                ")" +
                ZhuKafkaUtil.getKafkaSinkDDL("dwd_trade_cart_add"));

        tableEnvironment.executeSql(
                "insert into dwd_trade_cart_add " +
                        "select * from lookup_cartAdd_table"
        ).print();



        //todo execute
        //tableEnvironment.execute("look_up_cart_add_table_job);
        streamExecutionEnvironment.execute();

    }
}
