package org.file_service

import java.util.Properties

import org.apache.spark.sql.{SaveMode, SparkSession}

object Test02 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[2]")
      .appName("app")
      .getOrCreate()

    val Df = spark
      .read
      .format("csv")
      .option("header", "true") //表示第一行是列的名称
      .option("multiLine", "true")
      .option("inferSchema", "true") //读取文件时是否自动判断列类型
      .load("hdfs://hadoop102:8020/comprehension/result.csv")

    var k = 1
    var p = 1
    //3.DataFrame注册成表
    Df.createTempView("csv_data")
    val url = "jdbc:mysql://localhost:3306/comprehension?useUnicode=false&characterEncoding=utf-8"
    val prop = new Properties()
    prop.put("user", "root")
    prop.put("password", "password")

    spark.sql(s"select type, avg(unit_price) avg_price, count(*) count ,zone, ${p} historyId ,${k} userId from csv_data group by zone,type order by type")
      .write.mode(SaveMode.Append).jdbc(url, "one", prop)

    spark.sql(s"select avg(unit_price) avg_price, zone, ${p} historyId ,${k} userId from csv_data group by zone")
      .write.mode(SaveMode.Append).jdbc(url, "two", prop)

    spark.sql(s"select toward, zone, ${p} historyId ,${k} userId, count(*) count from csv_data group by zone,toward order by toward")
      .write.mode(SaveMode.Append).jdbc(url, "three", prop)

    spark.sql(s"select furnish, zone, ${p} historyId ,${k} userId, count(*) count from csv_data group by zone,furnish order by furnish")
      .write.mode(SaveMode.Append).jdbc(url, "four", prop)

    spark.sql(s"select floor, zone, ${p} historyId ,${k} userId, avg(unit_price) avg_price,count(*) count from csv_data group by zone,floor order by floor")
      .write.mode(SaveMode.Append).jdbc(url, "five_six", prop)

    spark.sql(s"select quarter(replace(date, '.', '-')) quarter, zone, ${p} historyId ,${k} userId, count(*) count from csv_data group by zone,quarter order by zone")
      .write.mode(SaveMode.Append).jdbc(url, "seven", prop)

    spark.sql(s"select age house_time,zone, ${p} historyId ,${k} userId, count(*) count from csv_data where age between 1990 and 2022 group by zone,house_time order by age ")
      .write.mode(SaveMode.Append).jdbc(url, "eight", prop)

    spark.sql(s"select sum(list_price-real_price) spread_price,zone, ${p} historyId ,${k} userId, count(*) count from csv_data where age between 1990 and 2022 group by zone order by zone ")
      .write.mode(SaveMode.Append).jdbc(url, "nine", prop)

    spark.sql(s"select avg(unit_price) avg_price ,month(replace(date, '.', '-')) month, zone, ${p} historyId ,${k} userId from csv_data group by month,zone order by month")
      .write.mode(SaveMode.Append).jdbc(url, "ten", prop)
    spark.stop()
  }
}


