package com.qdu.data1

import org.apache.spark.sql.SaveMode

object UserLikes_task1 {
  def main(args: Array[String]): Unit = {
       import org.apache.spark.sql.SparkSession
       val spark = SparkSession.builder()
             .appName("UserLikes_task1")
             .master("spark://niit-master:7077")
             .config("hive.metastore.uris", "thrift://niit-master:9083")
             .enableHiveSupport()
             .getOrCreate()

    val csvFile = Seq("hdfs://niit-master/spark/douyin_dataset.csv")
    val csvDF = spark.read.format("csv").option("header", true).load(csvFile.mkString(","))

//    val csvDF = spark.read
//      .format("jdbc")
//      .option("url", "jdbc:mysql://localhost:3306/spark")
//      .option("driver", "com.mysql.jdbc.Driver")
//      .option("user", "root")
//      .option("password", "root")
//      .option("dbtable", "douyin_dataset")
//      .load()


    csvDF.createTempView("DY")

    //不同用户点赞量
    //使用子查询，内部查询出用户每个用户点赞数量
    val result =
    """
      |SELECT
      |    SUM(CASE WHEN num = 0 THEN 1 ELSE 0 END) AS value_0,
      |    SUM(CASE WHEN num BETWEEN 1 AND 5 THEN 1 ELSE 0 END) AS value_1_to_5,
      |    SUM(CASE WHEN num > 5 THEN 1 ELSE 0 END) AS value_greater_than_5
      |FROM (SELECT uid,sum(like) AS num
      |    FROM DY
      |    GROUP BY uid
      |    order by num)
      |""".stripMargin

    val df = spark.sql(result)
    df.show(true)
    df.write.format("jdbc")
      .option("url", "jdbc:mysql://localhost:3306/spark")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("user", "root")
      .option("password", "root")
      .option("dbtable", "task1")
      .mode(SaveMode.Overwrite)
      .save()

    spark.close()
  }
}
