package net.bwie.realtime.warehouse.ADS;

import org.apache.flink.configuration.Configuration;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableEnvironment;

/**
 * @BelongsProject: realtime-project-10zlq
 * @BelongsPackage: net.bwie.realtime.warehouse.ADS
 * @Author: zhangleqing
 * @CreateTime: 2025-09-02  15:07
 * @Description: TODO 退前预警
 * 统计每天的 发货风险订单 物流风险订单
 *
 * 发货风险订单 判定条件
 * 1、即将超时：距承诺发货时间不足 8 小时
 * 2、延迟发货：揽收已超时订单
 * 3、缺货：延迟发货后 72 小时内仍未揽收订单
 * 4、虚假点击发货：点击发货后 24H 无揽收记录
 *
 * 物流风险订单 判定条件
 * 1、揽收-更新异常：揽收后停滞及即将超时订单，揽收后停滞定义为揽收超 24h 且当前仍未有物流有效节点更新。
 * 2、运输-派送异常：运输停滞及即将停滞订单，运输停滞定义为网点/首分拨中心-派件物流信息流转过程中，停滞时间超过正常快递节点流转时间，
 * 3、派送-签收异常：派签停滞及即将超时订单，派签停滞定义为从派送-签收停滞时间超24小时的订单。
 * @Version: 1.0
 */
public class One {
    private static TableEnvironment tableEnv;

    public static void main(String[] args) {
        // 1 获取TableEnv
        TableEnvironment tableEnv = getTableEnv();

        // 2 读取数据
        readTable(tableEnv);

        // 3 数据处理
        Table resultTable = handle(tableEnv);

//        resultTable.execute().print();

        // 4 映射表创建
        createView(tableEnv);

        // 5 数据写出
        saveToSink(tableEnv, resultTable);
    }

    private static void saveToSink(TableEnvironment tableEnv, Table resultTable) {
        tableEnv.createTemporaryView("refund", resultTable);
        tableEnv.executeSql(
                "insert into refund_one " +
                        "select * from refund"
        ).print();
    }

/*
CREATE TABLE refund_one (
    window_start TIMESTAMP(3),
    window_end TIMESTAMP(3),
    shipping_risk_count INT,
    logistics_risk_count INT,
    PRIMARY KEY (`window_start`) NOT ENFORCED
) WITH (
    'connector' = 'doris',
    'fenodes' = 'node102:8030',
    'table.identifier' = 'refund_insights.refund_one',
    'username' = 'root',
    'password' = '123456',
    // 1. 替代 sink.batch.size：达到1000条数据就触发写入（测试用，生产可改1000）
    'sink.buffer-flush.max-rows' = '1000',
    // 2. 替代 sink.batch.interval：10秒超时触发写入（防止数据积压）
    'sink.buffer-flush.interval' = '10000',
    // 3. 替代 sink.max-retries：失败重试3次（原参数支持，保留）
    'sink.max-retries' = '3',
    // 4. 适配 Upsert 语义：允许删除/更新（旧版通过该参数支持）
    'sink.enable-delete' = 'true',
    // 5. 数据格式：与Kafka输出匹配（原参数支持，保留）
    'sink.properties.format' = 'json',
    // 6. （可选）批量写入模式（提升性能，旧版支持）
    'sink.enable.batch-mode' = 'true'
)
 */

    private static void createView(TableEnvironment tableEnv) {
        tableEnv.executeSql(
                "CREATE TABLE refund_one (\n" +
                        "    window_start TIMESTAMP(3),\n" +
                        "    window_end TIMESTAMP(3),\n" +
                        "    shipping_risk_count INT,\n" +
                        "    logistics_risk_count INT,\n" +
                        "    PRIMARY KEY (`window_start`) NOT ENFORCED\n" +
                        ") WITH (\n" +
                        "    'connector' = 'doris',\n" +
                        "    'fenodes' = 'node102:8030',\n" +
                        "    'table.identifier' = 'refund_insights.refund_one',\n" +
                        "    'username' = 'root',\n" +
                        "    'password' = '123456',\n" +
                        "    // 1. 替代 sink.batch.size：达到1000条数据就触发写入（测试用，生产可改1000）\n" +
                        "    'sink.buffer-flush.max-rows' = '1000',\n" +
                        "    // 2. 替代 sink.batch.interval：10秒超时触发写入（防止数据积压）\n" +
                        "    'sink.buffer-flush.interval' = '10000',\n" +
                        "    // 3. 替代 sink.max-retries：失败重试3次（原参数支持，保留）\n" +
                        "    'sink.max-retries' = '3',\n" +
                        "    // 4. 适配 Upsert 语义：允许删除/更新（旧版通过该参数支持）\n" +
                        "    'sink.enable-delete' = 'true',\n" +
                        "    // 5. 数据格式：与Kafka输出匹配（原参数支持，保留）\n" +
                        "    'sink.properties.format' = 'json',\n" +
                        "    // 6. （可选）批量写入模式（提升性能，旧版支持）\n" +
                        "    'sink.enable.batch-mode' = 'true'\n" +
                        ")"
        );
    }

/*
-- 1. 即将超时：     = 支付时间 - 承诺发货时间 < 8小时
-- 2. 延迟发货：     = 揽收时间 - 延迟发货时间 > 8小时
-- 3. 缺货：        = 揽收时间 - 延迟发货时间 > 72
-- 4. 虚假点击发货：  = 揽收时间 - 已发货时间 > 24

-- 5. 揽收-更新异常：= 运输 - 揽收 >  24h
-- 6. 运输-派送异常：= 派送 - 运输 >  8小时
-- 7. 派送-签收异常：= 签收时间 - 派送时间 >  24小时

SELECT
    TUMBLE_START(pay_time, INTERVAL '1' DAY) AS window_start,
    TUMBLE_END(pay_time, INTERVAL '1' DAY) AS window_end,
    sum(is_about_to_timeout + is_late_shipment + is_stockout + is_fake_ship_click) as shipping_risk_types,
    sum(is_collection_update_exception + is_transport_delivery_exception + is_delivery_sign_exception) as logistics_risk_type
FROM dws_before_retreating
GROUP BY
    TUMBLE(pay_time, INTERVAL '1' DAY)
 */

    private static Table handle(TableEnvironment tableEnv) {
        Table table = tableEnv.sqlQuery(
                "SELECT\n" +
                        "    TUMBLE_START(pay_time, INTERVAL '1' DAY) AS window_start,\n" +
                        "    TUMBLE_END(pay_time, INTERVAL '1' DAY) AS window_end,\n" +
                        "    sum(is_about_to_timeout + is_late_shipment + is_stockout + is_fake_ship_click) as shipping_risk_count,\n" +
                        "    sum(is_collection_update_exception + is_transport_delivery_exception + is_delivery_sign_exception) as logistics_risk_count\n" +
                        "FROM dws_before_retreating\n" +
                        "GROUP BY\n" +
                        "    TUMBLE(pay_time, INTERVAL '1' DAY)"
        );
        // 根据之前在dws层的计算，直接求和即可
        return table;
    }

    private static void readTable(TableEnvironment tableEnv) {
        tableEnv.executeSql(
                "create table dws_before_retreating(\n" +
                        "    order_id STRING,\n" +
                        "    product_id STRING,\n" +
                        "    pay_time TIMESTAMP(3),\n" +
                        "    promise_ship_time TIMESTAMP(3),\n" +
                        "\n" +
                        "    logistics_start_time TIMESTAMP(3),\n" +
                        "    logistics_end_time TIMESTAMP(3),\n" +
                        "    expected_start_time TIMESTAMP(3),\n" +
                        "    expected_end_time TIMESTAMP(3),\n" +
                        "    logistics_node STRING,\n" +
                        "    is_about_to_timeout INT,\n" +
                        "    is_late_shipment INT,\n" +
                        "    is_stockout INT,\n" +
                        "    is_fake_ship_click INT,\n" +
                        "    is_collection_update_exception INT,\n" +
                        "    is_transport_delivery_exception INT,\n" +
                        "    is_delivery_sign_exception INT,\n" +
                        "    proctime AS PROCTIME(),\n" +
                        "    WATERMARK FOR pay_time AS pay_time - INTERVAL '5' SECOND\n" +
                        ") WITH (\n" +
                        "    'connector' = 'kafka',\n" +
                        "    'topic' = 'dws_before_retreating',\n" +
                        "    'properties.bootstrap.servers' = 'node101:9092',\n" +
                        "    'properties.group.id' = 'dws_before_retreating',\n" +
                        "    'scan.startup.mode' = 'earliest-offset',\n" +
                        "    'format' = 'json'\n" +
                        ")"
        );
    }


    public static TableEnvironment getTableEnv() {
        // 1.环境属性设置
        EnvironmentSettings settings = EnvironmentSettings.newInstance()
                .inStreamingMode()
                .build();
        TableEnvironment tabEnv = TableEnvironment.create(settings);
        // 2.配置属性设置
        Configuration configuration = tabEnv.getConfig().getConfiguration();
        configuration.setString("table.local-time-zone", "Asia/Shanghai");
        configuration.setString("table.exec.resource.default-parallelism", "1");
        // 状态TTL设置为25小时（3600*25=90000秒），覆盖24小时的关联区间
        configuration.setString("table.exec.state.ttl", "90000 s");
//        configuration.setString("execution.checkpointing.interval", "5 s");
        // 3.返回对象
        return tabEnv;
    }
}
