package com.wfg.bigdata.spark.sparksql;

import org.apache.spark.sql.SparkSession;


public class SparkSQL07_Source_Hive_Case_1 {
    public static void main(String[] args) {

        // TODO 在编码前，设定Hadoop的访问用户
        System.setProperty("HADOOP_USER_NAME","root");
        // TODO 构建环境对象
        //      Spark在结构化数据的处理场景中对核心功能，环境进行了封装
        //      构建SparkSQL的环境对象时，一般采用构建器模式
        //      构建器模式： 构建对象
        final SparkSession spark = SparkSession
                .builder()
                .enableHiveSupport() // TODO 启用Hive的支持
                .master("local[*]")
                .appName("SparkSQL")
                .getOrCreate();

        spark.sql("use atguigu;");



        spark.sql("select\n" +
                "    *\n" +
                "from (\n" +
                "    select\n" +
                "        *,\n" +
                "        rank() over( partition by area order by clickCnt desc ) as rank\n" +
                "    from (\n" +
                "        select\n" +
                "           area,\n" +
                "           product_name,\n" +
                "           count(*) as clickCnt\n" +
                "        from (\n" +
                "            select\n" +
                "               a.*,\n" +
                "               p.product_name,\n" +
                "               c.area,\n" +
                "               c.city_name\n" +
                "            from user_visit_action a\n" +
                "            join product_info p on a.click_product_id = p.product_id\n" +
                "            join city_info c on a.city_id = c.city_id\n" +
                "            where a.click_product_id > -1\n" +
                "        ) t1 group by area, product_name\n" +
                "    ) t2\n" +
                ") t3 where rank <= 3").show();

        // TODO 释放资源
        spark.close();

    }
}
