package com.bigdata.exam

import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object Meishi {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("meishi")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    // 读取restaurant.csv数据 ，删除最后为空值的两列
    val df = spark.read.format("csv")
      .option("header", "true")
      .load("/data/restaurant.csv")
    val df1 = df.drop("_c10").drop("_c11")

    // 导入的使用sparkSession这个对象中的隐式转换，这个对象名是什么，这里的spark需要进行替换
    import spark.implicits._
    // 想要在DSL中使用函数，需要导入sparksql中的function
    import org.apache.spark.sql.functions._

    // 中文列名需转换成英文
    val df2 = df1.withColumnRenamed("口味", "taste")

    //筛选出口味评分大于7分的数据
    val df3 = df2.filter("taste > 7")

    //统计各类别餐饮点评数，并按降序排列
    val df4 = df3.withColumnRenamed("类别", "type")
    val df5 = df4.withColumnRenamed("点评数", "comments")
    val df6 = df5.withColumn("comments", col("comments")
      .cast(org.apache.spark.sql.types.DoubleType))
    val df7 = df6.groupBy("type").sum("comments")
      .withColumnRenamed("sum(comments)", "total_comments")
      .orderBy(desc("total_comments"))
    val df8 = df7.orderBy(desc("sum(comments)"))

    //保存数据到HDFS
    df3.rdd.saveAsTextFile("/data/Result/taste")

    df8.rdd.repartition(1)
      .saveAsTextFile("/data/Result/sort_comments")
  }
}
