package com.example.example;

import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;

public class HourTrafficTrendAnalysis {

    /**
     * 主函数，执行小时流量趋势分析任务
     * 该函数会创建Spark会话，从Hive表中查询用户行为数据，按小时分组统计各项指标，
     * 并将结果写入MySQL数据库中
     *
     * @param args 命令行参数数组
     */
    public static void main(String[] args) {
        // 创建Spark会话，启用Hive支持，使用本地所有核心运行
        SparkSession spark = SparkSession.builder().appName("HourTrafficTrendAnalysis")
                .master("local[*]")
                .enableHiveSupport()
                .getOrCreate();

        // 执行SQL查询，按小时分组统计用户行为数据
        // 统计指标包括：独立用户数、页面访问次数、收藏次数、加购次数、购买次数
        Dataset<Row> ds = spark.sql("SELECT " +
                "  hour, " +
                "  count(distinct user_id) as user_cnt, " +
                "  sum(case when type = 'pv' then 1 else 0 end) as pv_cnt, " +
                "  sum(case when type = 'fav' then 1 else 0 end) as fav_cnt, " +
                "  sum(case when type = 'cart' then 1 else 0 end) as cart_cnt, " +
                "  sum(case when type = 'buy' then 1 else 0 end) as buy_cnt " +
                "FROM  " +
                "  dwd.dwd_behavior_info " +
                "GROUP BY hour");
        // 将分析结果写入MySQL数据库，覆盖已存在的数据
        ds.write()
                .format("jdbc")
                .option("url","jdbc:mysql://hadoop:3306/ana_behavior_db")
                .option("dbtable","hour_traffic_trend_analysis")
                .option("user","root")
                .option("password","123456")
                .option("driver","com.mysql.cj.jdbc.Driver")
                .mode("overwrite")
                .save();

        // 关闭Spark会话
        spark.close();
    }
}
