package net.bwie.realtime.warehouse.ADS;

import org.apache.flink.configuration.Configuration;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableEnvironment;

/**
 * @BelongsProject: realtime-project-10zlq
 * @BelongsPackage: net.bwie.realtime.warehouse.ADS
 * @Author: zhangleqing
 * @CreateTime: 2025-09-04  14:20
 * @Description: TODO 退后行为追踪 指标01
 * 根据【一天窗口 + 退货状态】分组求【退款人数, 退款子订单数, 订单退款率, 退款金额, 金额退款率】
 * @Version: 1.0
 */
public class Three_One {
    private static TableEnvironment tableEnv;

    public static void main(String[] args) {
        // 1 创建环境
        TableEnvironment tableEnv = getTableEnv();

        // 2 创建输入表
        createTable(tableEnv);

        // 3 执行计算
        Table resultTable = handle(tableEnv);

//        resultTable.execute().print();

        // 4 创建输出表
        createView(tableEnv);

        // 5 保存数据
        saveToSink(tableEnv, resultTable);
    }

    private static void saveToSink(TableEnvironment tableEnv, Table resultTable) {
        tableEnv.createTemporaryView("ads_refund", resultTable);
        tableEnv.executeSql(
                "insert into refund_three " +
                        " select * from ads_refund"
        ).print();
    }

/*
写入Doris中
CREATE TABLE refund_three (
    window_start TIMESTAMP(3),
    window_end TIMESTAMP(3),
    refund_scene STRING,
    refund_count INT,
    refund_detail_count INT,
    refund_detail_ratio DOUBLE,
    refund_amount DOUBLE,
    refund_amount_ratio DOUBLE,
    PRIMARY KEY (`window_start`) NOT ENFORCED
) WITH (
    'connector' = 'doris',
    'fenodes' = 'node102:8030',
    'table.identifier' = 'refund_insights.refund_three',
    'username' = 'root',
    'password' = '123456',
    // 1. 替代 sink.batch.size：达到1000条数据就触发写入（测试用，生产可改1000）
    'sink.buffer-flush.max-rows' = '1000',
    // 2. 替代 sink.batch.interval：10秒超时触发写入（防止数据积压）
    'sink.buffer-flush.interval' = '10000',
    // 3. 替代 sink.max-retries：失败重试3次（原参数支持，保留）
    'sink.max-retries' = '3',
    // 4. 适配 Upsert 语义：允许删除/更新（旧版通过该参数支持）
    'sink.enable-delete' = 'true',
    // 5. 数据格式：与Kafka输出匹配（原参数支持，保留）
    'sink.properties.format' = 'json',
    // 6. （可选）批量写入模式（提升性能，旧版支持）
    'sink.enable.batch-mode' = 'true'
)
 */

    private static void createView(TableEnvironment tableEnv) {
        tableEnv.executeSql(
                "CREATE TABLE refund_three (\n" +
                        "    window_start TIMESTAMP(3),\n" +
                        "    window_end TIMESTAMP(3),\n" +
                        "    refund_scene STRING,\n" +
                        "    refund_count BIGINT,\n" +
                        "    refund_detail_count BIGINT,\n" +
                        "    refund_detail_ratio DECIMAL(26, 2),\n" +
                        "    refund_amount DOUBLE,\n" +
                        "    refund_amount_ratio DOUBLE,\n" +
                        "    PRIMARY KEY (`window_start`) NOT ENFORCED\n" +
                        ") WITH (\n" +
                        "    'connector' = 'doris',\n" +
                        "    'fenodes' = 'node102:8030',\n" +
                        "    'table.identifier' = 'refund_insights.refund_three',\n" +
                        "    'username' = 'root',\n" +
                        "    'password' = '123456',\n" +
                        "    // 1. 替代 sink.batch.size：达到1000条数据就触发写入（测试用，生产可改1000）\n" +
                        "    'sink.buffer-flush.max-rows' = '1000',\n" +
                        "    // 2. 替代 sink.batch.interval：10秒超时触发写入（防止数据积压）\n" +
                        "    'sink.buffer-flush.interval' = '10000',\n" +
                        "    // 3. 替代 sink.max-retries：失败重试3次（原参数支持，保留）\n" +
                        "    'sink.max-retries' = '3',\n" +
                        "    // 4. 适配 Upsert 语义：允许删除/更新（旧版通过该参数支持）\n" +
                        "    'sink.enable-delete' = 'true',\n" +
                        "    // 5. 数据格式：与Kafka输出匹配（原参数支持，保留）\n" +
                        "    'sink.properties.format' = 'json',\n" +
                        "    // 6. （可选）批量写入模式（提升性能，旧版支持）\n" +
                        "    'sink.enable.batch-mode' = 'true'\n" +
                        ")"
        );
    }

/*
select
    TUMBLE_START(pay_time, INTERVAL '1' DAY) AS window_start,
    TUMBLE_END(pay_time, INTERVAL '1' DAY) AS window_end,
    refund_scene,
    count(distinct case when refund_id is not null then user_id else null end) as refund_count,
    count(distinct case when refund_id is not null then refund_detail_id else null end) as refund_detail_count,

    (count(distinct case when refund_id is not null then order_id else null end) * 1.0
    / count(distinct order_id)) * 100 as refund_detail_ratio,

    sum(refund_amount) as refund_amount,
    (sum(refund_amount) * 1.0
    / sum(case when refund_id is not null then order_amount else 0 end)) * 100 as refund_amount_ratio
from dwd_retreat_behavior
group by TUMBLE(pay_time, INTERVAL '1' DAY), refund_scene


WITH
-- 步骤1：按pay_time（事件时间属性）计算每天的全局总订单数
daily_total_orders AS (
    SELECT
        TUMBLE_START(pay_time, INTERVAL '1' DAY) AS window_start,  -- 使用pay_time作为窗口时间
        COUNT(DISTINCT order_id) AS total_order_count
    FROM dwd_retreat_behavior
    WHERE behavior_type = '下单'
    GROUP BY TUMBLE(pay_time, INTERVAL '1' DAY)  -- 基于事件时间属性聚合
),
-- 步骤2：按pay_time计算各退款场景的指标
daily_refund_metrics AS (
    SELECT
        TUMBLE_START(pay_time, INTERVAL '1' DAY) AS window_start,
        refund_scene,
        COUNT(DISTINCT user_id) AS refund_count,
        COUNT(DISTINCT refund_detail_id) AS refund_detail_count,
        COUNT(DISTINCT order_id) AS refund_order_count,
        SUM(refund_amount) AS total_refund_amount,
        SUM(DISTINCT order_amount) AS total_refund_order_amount
    FROM dwd_retreat_behavior
    WHERE refund_id IS NOT NULL
    GROUP BY TUMBLE(pay_time, INTERVAL '1' DAY), refund_scene  -- 基于事件时间属性聚合
)
-- 步骤3：关联计算最终指标
SELECT
    drm.window_start,
    drm.window_start + INTERVAL '1' DAY AS window_end,
    drm.refund_scene,
    drm.refund_count,
    drm.refund_detail_count,
    CASE WHEN dto.total_order_count = 0 THEN 0
         ELSE (drm.refund_order_count * 1.0 / dto.total_order_count) * 100
    END AS refund_detail_ratio,
    drm.total_refund_amount,
    CASE WHEN drm.total_refund_order_amount = 0 THEN 0
         ELSE (drm.total_refund_amount * 1.0 / drm.total_refund_order_amount) * 100
    END AS refund_amount_ratio
FROM daily_refund_metrics drm
LEFT JOIN daily_total_orders dto
    ON drm.window_start = dto.window_start
WHERE drm.refund_scene IS NOT NULL
*/

    private static Table handle(TableEnvironment tableEnv) {
        Table table = tableEnv.sqlQuery(
                "WITH \n" +
                        "-- 计算每日全局总订单数和总金额\n" +
                        "daily_total_orders AS (\n" +
                        "    SELECT\n" +
                        "        TUMBLE_START(pay_time, INTERVAL '1' DAY) AS window_start,\n" +
                        "        COUNT(DISTINCT order_id) AS total_order_count,\n" +
                        "        SUM(order_amount) AS total_order_amount\n" +
                        "    FROM dwd_retreat_behavior\n" +
                        "    WHERE behavior_type = '下单'\n" +
                        "    GROUP BY TUMBLE(pay_time, INTERVAL '1' DAY)\n" +
                        "),\n" +
                        "-- 计算每日各退款场景的指标\n" +
                        "refund_metrics AS (\n" +
                        "    SELECT\n" +
                        "        TUMBLE_START(pay_time, INTERVAL '1' DAY) AS window_start,\n" +
                        "        refund_scene,\n" +
                        "        COUNT(DISTINCT user_id) AS refund_count,\n" +
                        "        COUNT(DISTINCT refund_detail_id) AS refund_detail_count,\n" +
                        "        COUNT(DISTINCT order_id) AS refund_order_count,\n" +
                        "        SUM(refund_amount) AS refund_amount,\n" +
                        "        -- 关键修正：统计退款订单的原始金额总和\n" +
                        "        SUM(CASE WHEN refund_id IS NOT NULL THEN order_amount ELSE 0 END) AS refund_order_original_amount\n" +
                        "    FROM dwd_retreat_behavior\n" +
                        "    WHERE refund_id IS NOT NULL\n" +
                        "    GROUP BY TUMBLE(pay_time, INTERVAL '1' DAY), refund_scene\n" +
                        ")\n" +
                        "SELECT\n" +
                        "    rm.window_start,\n" +
                        "    rm.window_start + INTERVAL '1' DAY AS window_end,\n" +
                        "    rm.refund_scene,\n" +
                        "    rm.refund_count,\n" +
                        "    rm.refund_detail_count,\n" +
                        "    -- 订单退款率\n" +
                        "    CASE \n" +
                        "        WHEN COALESCE(dto.total_order_count, 0) = 0 THEN 0.0\n" +
                        "        ELSE ROUND(\n" +
                        "            GREATEST(0, LEAST(100, (rm.refund_order_count * 100.0 / dto.total_order_count))), \n" +
                        "            2\n" +
                        "        )\n" +
                        "    END AS refund_detail_ratio,\n" +
                        "    rm.refund_amount,\n" +
                        "    -- 金额退款率（修正：使用退款订单的原始金额作为分母）\n" +
                        "    CASE \n" +
                        "        WHEN COALESCE(rm.refund_order_original_amount, 0) = 0 THEN 0.0\n" +
                        "        ELSE ROUND(\n" +
                        "            GREATEST(0, LEAST(100, (rm.refund_amount * 100.0 / rm.refund_order_original_amount))), \n" +
                        "            2\n" +
                        "        )\n" +
                        "    END AS refund_amount_ratio\n" +
                        "FROM refund_metrics rm\n" +
                        "LEFT JOIN daily_total_orders dto\n" +
                        "    ON rm.window_start = dto.window_start\n" +
                        "WHERE rm.refund_scene IS NOT NULL\n"
        );
        return table;
    }

    private static void createTable(TableEnvironment tableEnv) {
        tableEnv.executeSql(
                "create table dwd_retreat_behavior (\n" +
                        "    order_id STRING,\n" +
                        "    product_id STRING,\n" +
                        "    pay_time TIMESTAMP(3),\n" +
                        "    order_amount DOUBLE,\n" +
                        "    \n" +
                        "    refund_detail_id STRING,\n" +
                        "    refund_id STRING,\n" +
                        "    apply_time TIMESTAMP(3),\n" +
                        "    refund_scene STRING,\n" +
                        "    refund_amount DOUBLE,\n" +
                        "    \n" +
                        "    behavior_id STRING,\n" +
                        "    user_id STRING,\n" +
                        "    behavior_type STRING,\n" +
                        "    proctime AS PROCTIME(),\n" +
                        "    WATERMARK FOR pay_time AS pay_time - INTERVAL '5' SECOND\n" +
                        ") WITH (\n" +
                        "    'connector' = 'kafka',\n" +
                        "    'topic' = 'dwd_retreat_behavior',\n" +
                        "    'properties.bootstrap.servers' = 'node101:9092',\n" +
                        "    'properties.group.id' = 'dwd_retreat_behavior',\n" +
                        "    'scan.startup.mode' = 'earliest-offset',\n" +
                        "    'format' = 'json'\n" +
                        ")"
        );
    }

    public static TableEnvironment getTableEnv() {
        // 1.环境属性设置
        EnvironmentSettings settings = EnvironmentSettings.newInstance()
                .inStreamingMode()
                .build();
        TableEnvironment tabEnv = TableEnvironment.create(settings);
        // 2.配置属性设置
        Configuration configuration = tabEnv.getConfig().getConfiguration();
        configuration.setString("table.local-time-zone", "Asia/Shanghai");
        configuration.setString("table.exec.resource.default-parallelism", "1");
        // 状态TTL设置为25小时（3600*25=90000秒），覆盖24小时的关联区间
        configuration.setString("table.exec.state.ttl", "90000 s");
//        configuration.setString("execution.checkpointing.interval", "5 s");
        // 3.返回对象
        return tabEnv;
    }
}
