package net.wlm.jtp.job;

import net.wlm.utils.AnalyzerUtil;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.table.annotation.DataTypeHint;
import org.apache.flink.table.annotation.FunctionHint;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableEnvironment;
import org.apache.flink.table.functions.TableFunction;
import org.apache.flink.types.Row;

import java.util.List;

/**
 *搜索关键词实时统计，其中使用IkAnalyser分词器进行分词，采用FlinkSQL方式数据处理。
 * @author xuanyu
 * @date 2025/5/20
 */

public class JtpTrafficSearchKeywordMinuteWindowDwsJob {
    public static void main(String[] args) {
        // 1.表执行环境
        TableEnvironment tabEnv = getTableEnv();

        // 2.输入表 -input:映射到kafka消息队列
        createInputTable(tabEnv);

        // 3.数据处理-select
        Table reportTable = handle(tabEnv);

        // 4.输出表 -output:映射到clickhouse
        createOutputTable(tabEnv);

        // 5.保存数据
        saveToClickHouse(tabEnv, reportTable);
    }

    private static void saveToClickHouse(TableEnvironment tabEnv, Table reportTable) {
        // a. 注册Table为表，给以表名称
        tabEnv.createTemporaryView("report_table", reportTable);
        // b. 查询-插入
        tabEnv.executeSql("insert into dws_traffic_search_keyword_window_report_clickhouse_sink\n" +
                "select\n" +
                "    DATE_FORMAT(window_start_time, 'yyyy-MM-dd HH:mm:ss') as window_start_time,\n" +
                "    DATE_FORMAT(window_end_time, 'yyyy-MM-dd HH:mm:ss') as window_end_time,\n" +
                "    keyword,\n" +
                "    keyword_count,\n" +
                "    ts\n" +
                "from report_table");
    }

    private static void createOutputTable(TableEnvironment tabEnv) {
        tabEnv.executeSql(
                "CREATE TABLE dws_traffic_search_keyword_window_report_clickhouse_sink\n" +
                        "(\n" +
                        "    `window_start_time` STRING COMMENT '窗口开始日期时间',\n" +
                        "    `window_end_time`   STRING COMMENT '窗口结束日期时间',\n" +
                        "    `keyword`           STRING COMMENT '搜索关键词',\n" +
                        "    `keyword_count`     BIGINT COMMENT '搜索关键词被搜索次数',\n" +
                        "    `ts`                BIGINT COMMENT '数据产生时间戳'\n" +
                        ") WITH (\n" +
                        "      'connector' = 'clickhouse',\n" +
                        "      'url' = 'jdbc:clickhouse://node103:8123/jtp_log_report',\n" +
                        "      'table' = 'dws_traffic_search_keyword_window_report',\n" +
                        "      'username' = 'default',\n" +
                        "      'password' = '',\n" +
                        "      'format' = 'json'\n" +
                        "      )"
        );
        System.out.println("输出表创建成功");
    }

    private static Table handle(TableEnvironment tabEnv) {
        // s1-获取搜索词和搜索时间
        Table searchLogTable = tabEnv.sqlQuery("select\n" +
                "    page['item'] as full_word,\n" +
                "    row_time\n" +
                "    from dwd_traffic_page_log_kafka_source\n" +
                "    where page['item_type'] = 'keyword'\n" +
                "    and page['last_page_id'] = 'search'");
        tabEnv.createTemporaryView("search_log_table", searchLogTable);
        // s2-使用自定义UDTF函数，对搜索词进行中文分词
        tabEnv.createTemporarySystemFunction("ik_analyzer_udtf", IKAnalyzerUDTF.class);
        Table wordLogTable = tabEnv.sqlQuery("select\n" +
                "    full_word,\n" +
                "    keyword,\n" +
                "    row_time\n" +
                "    from search_log_table,\n" +
                "lateral table(ik_analyzer_udtf(full_word)) as T(keyword)");
        tabEnv.createTemporaryView("word_log_table", wordLogTable);
        // s3-设置窗口进行分组、聚合计算
        Table reportTable = tabEnv.sqlQuery("select\n" +
                "    TUMBLE_START(row_time, INTERVAL '1' MINUTES) as window_start_time,\n" +
                "    TUMBLE_END(row_time, INTERVAL '1' MINUTES) as window_end_time,\n" +
                "    keyword,\n" +
                "    count(keyword) as keyword_count,\n" +
                "    UNIX_TIMESTAMP() * 1000 as ts\n" +
                "      from word_log_table\n" +
                "    group by\n" +
                "    TUMBLE(row_time, INTERVAL '1' MINUTES)\n" +
                "        , keyword");
        // 返回计算结果
        return reportTable;
    }

    private static void createInputTable(TableEnvironment tabEnv) {
        tabEnv.executeSql(
                "CREATE TABLE dwd_traffic_page_log_kafka_source\n" +
                        "(\n" +
                        "    `common` MAP<STRING,\n" +
                        "    STRING> COMMENT '公共环境信息',\n" +
                        "    `page`   MAP<STRING,\n" +
                        "    STRING> COMMENT '页面信息',\n" +
                        "    `ts`     BIGINT,\n" +
                        "    row_time AS TO_TIMESTAMP(FROM_UNIXTIME(ts / 1000, 'yyyy-MM-dd HH:mm:ss.SSS')),\n" +
                        "    WATERMARK FOR row_time AS row_time - INTERVAL '0' MINUTE\n" +
                        ") WITH (\n" +
                        "      'connector' = 'kafka',\n" +
                        "      'topic' = 'dwd-traffic-page-log',\n" +
                        "      'properties.bootstrap.servers' = 'node101:9092,node102:9092,node103:9092',\n" +
                        "      'properties.group.id' = 'gid_dws_traffic_search_keyword',\n" +
                        "      'scan.startup.mode' = 'earliest-offset',\n" +
                        "      'format' = 'json',\n" +
                        "      'json.fail-on-missing-field' = 'false',\n" +
                        "      'json.ignore-parse-errors' = 'true'\n" +
                        "      )"
        );
    }

    private static TableEnvironment getTableEnv() {
        // a-环境属性设置
        EnvironmentSettings settings = EnvironmentSettings.newInstance()
                .inStreamingMode()
                .useBlinkPlanner()
                .build();
        TableEnvironment tabEnv = TableEnvironment.create(settings);
        // b-属性配置设置
        Configuration configuration = tabEnv.getConfig().getConfiguration();
        configuration.setString("table.local-time-zone","Asia/Shanghai");
        configuration.setString("table.exec.resource.default-parallelism", "1");
        configuration.setString("table.exec.state.ttl", "5 s");
        // 3返回对象
        return tabEnv;
    }
    /**
     * FlinkSQL中自定义UDTF函数，使用IKAnalyzer分词器对搜索词进行分词
     https://nightlies.apache.org/flink/flink-docs-release-1.13/docs/dev/table/functions/udfs/#table-functions
     */
    @FunctionHint(output = @DataTypeHint("ROW<keyword STRING>"))
    public static class IKAnalyzerUDTF extends TableFunction<Row> {
        public void eval(String text) throws Exception {
            // 1.使用IKAnalyzer分词器对搜索词进行分词
            List<String> list = AnalyzerUtil.ikAnalyzer(text);
            // 2.将分词结果返回
            for(String keyword : list){
                collect(Row.of(keyword));
            }
        }
    }
}
