package com.atguigu.edu.realtime.app.dwd;

import com.atguigu.edu.realtime.common.Constant;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import java.time.Duration;


public class Dwd_TradeOrderDetail {

    public static final String ckAndAndJobName = "Dwd_TrafficLogAppVideoDetail";
    public static final String source_topic = Constant.TOPIC_DWD_TRAFFIC_APPVIDEO;
    public static final Integer port = 3005;
    // todo 这个地方后期可以优化改为执行传参
    public static final Integer parallelism = 2;

    // todo 为了练习 这里就不封装了 多打几遍熟悉语法
    public static void main(String[] args) {
        /**
            1、设置系统用户
         */
        System.setProperty("HADOOP_USER_NAME", "atguigu");

        /**
            2、创建Stream流执行环境
         */
        Configuration configuration = new Configuration();
        configuration.setInteger("port", port);
        // 在flink中所有都可以使用这个参数设置， jobName
        configuration.setString("pipeline.name", ckAndAndJobName);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(configuration);

        env.setParallelism(parallelism); // 并行度设置原则：和kafka的topic的分区数保持一致

        // 开启checkpoint，严格一次
        // 1、设置状态后端
        env.setStateBackend(new HashMapStateBackend());
        // 2、开启状态后端并设置周期
        env.enableCheckpointing(3000);
        // 3、设置checkpoint语义 精准一次
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        // 4、设置checkpoint的存储目录
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop162:8020/edu/" + ckAndAndJobName);
        // 5、设置超时时间
        env.getCheckpointConfig().setCheckpointTimeout(60 * 1000);
        // 6、设置checkpoint的并发数
//        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        // 7、checkpoint之间的间隔   6和7 都是为了解决上一次checkpoint时间太久和下次重叠
            env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
        // 8、当job取消的时候 checkpoint的保留策略
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

        /**
            3、创建Stream表的执行环境
         */
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
        // 给sql应用设置名字
        tEnv.getConfig().getConfiguration().setString("pipeline.name", ckAndAndJobName);
        // todo 因为order_detail 和 order_info 有join会默认永久保存在内存中，如果不设置ttl会造成OOM
        tEnv.getConfig().setIdleStateRetention(Duration.ofSeconds(5));

        /**
             4、读取Kafka中ods_db主题的数据，放入到动态表 DDL

                create table ods_db (
                `database` string,      // 因为ods_db里可能包含了多个库多张表的数据
                `table` string,         // 因为ods_db里可能包含了多个库多张表的数据
                `type`  string,
                `ts`    string,
                `data`  map<string, string>,
                `old`   map<string, string>,
                `proc_time` as proctime()   //处理时间是给lookup join使用的
                ) with (
                    'connector' = 'kafka',
                    'properties.bootstrap.servers' = 'hadoop162:9092, hadoop163:9092, hadoop164:9092',
                    'properties.group.id' = 'Dwd_TradeOrderDetail',
                    'topic' = 'ods_db',
                    'scan.startup.mode' = 'latest-offset',
                    'json.ignore-parse-error' = 'true',
                    'format' = 'json'
                )
         */
        tEnv.executeSql(
                "                create table ods_db ( " +
                        "                `database` string, " +
                        "                `table` string, " +
                        "                `type`  string, " +
                        "                `ts`    string, " +
                        "                `data`  map<string, string>, " +
                        "                `old`   map<string, string>, " +
                        "                `proc_time` as proctime()    " + //处理时间是给lookup join使用的
                        "                ) with ( " +
                        "                    'connector' = 'kafka', " +
                        "                    'properties.bootstrap.servers' = 'hadoop162:9092, hadoop163:9092, hadoop164:9092', " +
                        "                    'properties.group.id' = 'Dwd_TradeOrderDetail', " +
                        "                    'topic' = 'ods_db', " +
                        "                    'scan.startup.mode' = 'latest-offset', " +
//                        "                    'json.ignore-parse-error' = 'true', " +
                        "                    'format' = 'json' " +
                        "                )"
        );

        /** 5、从ods_db表查询订单数据
            基于上面的从kafka的ods_db读取数据创建的ods_db这张表中 筛选出订单表order_info的数据
            select
                data['id'] id,
                data['final_amount'] final_amount,
                data['session_id'] session_id,
                data['province_id'] province_id,
                ts ts
            from ods_db
            where `database` = 'edu' 
                and `table` = 'order_info'
                and (`type` = 'insert' or `type` = 'update')
         */
        Table orderInfo = tEnv.sqlQuery(
            "            select  " +
                    "                data['id'] id,  " +
                    "                data['final_amount'] final_amount,  " +
                    "                data['session_id'] session_id,  " +
                    "                data['province_id'] province_id,  " +
                    "                ts ts  " +
                    "            from ods_db   " +
                    "            where `database` = 'edu'   " +
                    "                and `table` = 'order_info'  " +
                    "                and (`type` = 'insert' or `type` = 'update')"
        );
        tEnv.createTemporaryView("order_info", orderInfo);

        /** 6、从ods_db表筛选订单明细数据
            select
                data['id'] id,
                data['course_id'] course_id,
                data['course_name'] course_name,
                data['order_id'] order_id,
                data['user_id'] user_id,
                ts ts,
                proc_time
            from ods_db
            where `database` = 'edu'
                and `table` = 'order_detail'
                and `type` = 'insert'
         */
        Table orderDetail = tEnv.sqlQuery(
                "            select  " +
                        "                data['id'] id,  " +
                        "                data['course_id'] course_id,  " +
                        "                data['course_name'] course_name,  " +
                        "                data['order_id'] order_id,  " +
                        "                data['user_id'] user_id,  " +
                        "                ts ts,  " +
                        "                proc_time  " +
                        "            from ods_db   " +
                        "            where `database` = 'edu'   " +
                        "                and `table` = 'order_detail'  " +
                        "                and `type` = 'insert'"
        );
        tEnv.createTemporaryView("order_detail", orderDetail);


        /** 7、从Kafka的日志page主题中读取数据，或来源source_id 创建动态表
            create table dwd_traffic_page (
                `common` map<string, string>,
                `page` map<string, string>,
                `ts` bigint
            ) with (
                'connector' = 'kafka',
                'properties.bootstrap.servers' = 'hadoop162:9092, hadoop163:9092, hadoop164:9092',
                'properties.group.id' = 'Dwd_TradeOrderDetail',
                'topic' = 'dwd_traffic_page',
                'scan.startup.mode' = 'latest-offset',
                'json.ignore-parse-error' = 'true',
                'format' = 'json'
            )
         */
        tEnv.executeSql(
                "            create table dwd_traffic_page (  " +
                        "                `common` map<string, string>,  " +
                        "                `page` map<string, string>,  " +
                        "                `ts` bigint  " +
                        "            ) with (  " +
                        "                'connector' = 'kafka',  " +
                        "                'properties.bootstrap.servers' = 'hadoop162:9092, hadoop163:9092, hadoop164:9092',  " +
                        "                'properties.group.id' = 'Dwd_TradeOrderDetail',  " +
                        "                'topic' = 'dwd_traffic_page',  " +
                        "                'scan.startup.mode' = 'latest-offset',  " +
//                        "                'json.ignore-parse-error' = 'true',  " +
                        "                'format' = 'json'  " +
                        "            )"
        );
        /** 筛选出dwd_traffic_page表中我们需要的字段
            select
                common['sc'] sc,
                common['sid'] sid,
                ts ts
            from dwd_traffic_page
         */
        Table trafficPage = tEnv.sqlQuery(
                "            select  " +
                        "                common['sc'] sc,  " +
                        "                common['sid'] sid,  " +
                        "                ts ts  " +
                        "            from dwd_traffic_page"
        );
        tEnv.createTemporaryView("traffic_log_page", trafficPage);
//        tEnv.executeSql("select * from traffic_log_page").print();


        /**
            8、关联order_info、order_detail、dwd_traffic_page
            select
                 od.id,
                 od.order_id,
                 od.user_id,
                 od.course_id,
                 od.course_name,
                 tlp.sc source_id,
                 tlp.sid session_id,
                 oi.province_id,
                 oi.final_amount,
                 od.ts,
                 od.proc_time
            from order_detail od
            left join order_info oi on od.order_id = oi.id
            left join traffic_log_page tlp on oi.session_id = tlp.sid
         */
        Table resultTable = tEnv.sqlQuery(
                "            select  " +
                        "                 od.id,  " +
                        "                 od.order_id,  " +
                        "                 od.user_id,  " +
                        "                 od.course_id,  " +
                        "                 od.course_name,  " +
                        "                 tlp.sc source_id,  " +
                        "                 tlp.sid session_id,  " +
                        "                 oi.province_id,  " +
                        "                 oi.final_amount,  " +
                        "                 od.ts,  " +
                        "                 od.proc_time  " +
                        "            from order_detail od  " +
                        "            left join order_info oi on od.order_id = oi.id  " +
                        "            left join traffic_log_page tlp on oi.session_id = tlp.sid"
        );
        tEnv.createTemporaryView("result_table", resultTable);
//        tEnv.executeSql("select * from result_table").print();



        /** 9、创建动态表，写入kafka
         //用到了left join，左右表写入先后顺序存在数据更新 所以要用upsertKafka，
            而upsertKafka需要有主键，因为flink本身不存储数据 所以没法对主键唯一校验和null校验，
            所以设置主键要指明primary key(id) not enforced
         
            create table dwd_trade_order_detail(
                id string,
                order_id string,
                user_id string,
                course_id string,
                course_name string,
                source_id string,
                session_id string,
                province_id string,
                final_amount string,
                ts string,
                proc_time timestamp,
                primary key(id) not enforced
            ) with(
                'connector' = 'upsert-kafka',
                'properties.bootstrap.servers' = 'hadoop162:9092, hadoop163:9092, hadoop164:9092',
                'topic' = 'dwd_trade_order_detail',
                'key.format' = 'json',
                'value.format' = 'json'
            )
         */
        tEnv.executeSql(
            "            create table dwd_trade_order_detail(  " +
                    "                id string,  " +
                    "                order_id string,  " +
                    "                user_id string,  " +
                    "                course_id string,  " +
                    "                course_name string,  " +
                    "                source_id string,  " +
                    "                session_id string,  " +
                    "                province_id string,  " +
                    "                final_amount string,  " +
                    "                ts string,  " +
                    "                proc_time timestamp,  " +
                    "                primary key(id) not enforced  " +
                    "            ) with(  " +
                    "                'connector' = 'upsert-kafka',  " +
                    "                'properties.bootstrap.servers' = 'hadoop162:9092, hadoop163:9092, hadoop164:9092',  " +
                    "                'topic' = 'dwd_trade_order_detail',  " +
                    "                'key.format' = 'json',  " +
                    "                'value.format' = 'json'  " +
                    "            )"
        );
        resultTable.executeInsert("dwd_trade_order_detail");

    }



}
