package com.bigdata.exam.cg

import org.apache.spark.sql.types.DoubleType
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SaveMode, SparkSession}

object Meishi {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession.builder()
      .master("local")
      .appName("meishi")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    val sc: SparkContext = spark.sparkContext
    sc.setLogLevel("error")

    val df: DataFrame = spark.read
      .format("csv")
      .option("header", true)
      .option("sep", ",")
      .load("data/restaurant.csv")

    // (1）读取restaurant.csv数据，删除最后为空值的两列。
    val df1: DataFrame = df.drop("_c10", "_c11")

    import spark.implicits._
    import org.apache.spark.sql.functions._

    // 中文列名需转换成英文
    val df2 = df1.withColumnRenamed("口味", "taste")

    //筛选出口味评分大于7分的数据
    val df3: DataFrame = df2.withColumnRenamed("类别", "type")
      .withColumnRenamed("点评数", "comments")
      .withColumn("comments", $"comments".cast(DoubleType))

    //统计各类别餐饮点评数，并按降序排列
    val df4: Dataset[Row] = df3
      .where($"taste" > 7)
      .groupBy($"type")
      .agg(sum($"comments") as "total_comments")
      .orderBy($"total_comments".desc)
    //    val df4: DataFrame = df3
    //      .where($"taste" > 7)
    //      .groupBy($"type")
    //      .agg(sum("comments") as "total_comments")
    //      .orderBy($"total_comments".desc)

    // 使用sql方式实现
    //    df3.createOrReplaceTempView("meishi")
    //    val df4: DataFrame = spark.sql(
    //      """
    //        |select type, sum(comments) as total_comments from meishi
    //        |where taste > 7
    //        |group by type
    //        |order by total_comments desc
    //        |""".stripMargin)

        df4.printSchema()
        df4.show()

    df4
      .write
      .format("json")
      .mode(SaveMode.Overwrite)
      // .save("data/meishi_json")
  }
}
