package com.huahua.bigdata.sparksql;

import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.functions;

public class SparkSQL09_Source_Req_1 {
    public static void main(String[] args) {

        // TODO 在编码前, 设定Hadoop的访问用户
        System.setProperty("HADOOP_USER_NAME", "atguigu");

        final SparkSession sparkSession = SparkSession
                .builder()
                .enableHiveSupport()  // TODO 启用Hive的支持
                .master("local[*]")
                .appName("SparkSQL")
                .getOrCreate();
        sparkSession.udf().register("cityRemark", functions.udaf(
                new MyCityRemarkUDAF(), Encoders.STRING()
        ));

        sparkSession.sql("select * from (\n" +
                "\tselect * \n" +
                "\trank() over(partition by area order by clickCnt desc) rk" +
                "\tfrom (\n" +
                "\tselect \n" +
                "\tarea, \n" +
                "\tproduct_name,\n" +
                "\tcount(*) clickCnt,\n" +
                "\tcityRemark(city_name) cityremark\n" +
                "from (\n" +
                "\tselect \n" +
                "\tclick_product_id,\n" +
                "\tcity_id" +
                "\tfrom user_visit_action\n" +
                "\twhere click_product_id != -1\n" +
                ") a\n" +
                "join (\n" +
                "\tselect \n" +
                "\tproduct_id,\n" +
                "\tproduct_name\n" +
                "\tfrom product_info\n" +
                ") p on a.click_product_id = p.product_id\n" +
                "join (\n" +
                " select \n" +
                "\tcity_id,\n" +
                "\tcity_name,\n" +
                "\tarea\n" +
                "\tfrom city_info\n" +
                ") c on a.city_id = c.city_id\n" +
                "group by area, product_id, product_name\n" +
                " \t ) t ) t1  where rk <= 3 ").show();

        sparkSession.close();
    }
}
