package com.wfg.bigdata.spark.sparksql;

import com.wfg.bigdata.spark.sparksql.udf.CityRemarkUDAF;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.SparkSession;

import static org.apache.spark.sql.functions.udaf;


public class SparkSQL07_Source_Hive_Case_2 {
    public static void main(String[] args) {

        // TODO 在编码前，设定Hadoop的访问用户
        System.setProperty("HADOOP_USER_NAME","root");
        // TODO 构建环境对象
        //      Spark在结构化数据的处理场景中对核心功能，环境进行了封装
        //      构建SparkSQL的环境对象时，一般采用构建器模式
        //      构建器模式： 构建对象
        final SparkSession spark = SparkSession
                .builder()
                .enableHiveSupport() // TODO 启用Hive的支持
                .master("local[*]")
                .appName("SparkSQL")
                .getOrCreate();

        spark.sql("use atguigu;");



        spark.sql("select\n" +
                "               a.*,\n" +
                "               p.product_name,\n" +
                "               c.area,\n" +
                "               c.city_name\n" +
                "            from user_visit_action a\n" +
                "            join product_info p on a.click_product_id = p.product_id\n" +
                "            join city_info c on a.city_id = c.city_id\n" +
                "            where a.click_product_id > -1").createOrReplaceTempView("t1");
        // TODO SparkSQL采用特殊的方式将UDAF转换成UDF使用
        //      UDAF使用时需要创建自定义聚合对象
        //        udaf方法需要传递2个参数
        //             第一个参数表示UDAF对象
        //             第二个参数表示UDAF对象
        spark.udf().register("cityRemark", udaf(
                new CityRemarkUDAF(), Encoders.STRING()
        ));

        spark.sql("  select\n" +
                "     area,\n" +
                "     product_name,\n" +
                "     count(*) as clickCnt,\n" +
                "     cityRemark(city_name) as city_remark\n" +
                "  from t1 group by area, product_name").createOrReplaceTempView("t2");

        spark.sql("select\n" +
                "      *,\n" +
                "      rank() over( partition by area order by clickCnt desc ) as rank\n" +
                "  from t2").createOrReplaceTempView("t3");
        spark.sql("select * from t3 where rank <= 3").show(false);
        // show 默认true 内容太多默认使用...
        // TODO 释放资源
        spark.close();

    }
}
