package org.file_service

import java.util.Properties

import org.apache.spark.sql.{SaveMode, SparkSession}

object Analysis {
  def analysis(p: Int, k: Int): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[4]")
      .appName("app")
      .getOrCreate()
    println("执行操作")
    val csvDf = spark
      .read
      .format("csv")
      .option("sep", ",")
      .load(s"hdfs://10.7.122.2:8020/zz/analysis/")
      .withColumnRenamed("_c0", "id")
      .withColumnRenamed("_c1", "date")
      .withColumnRenamed("_c2", "community")
      .withColumnRenamed("_c3", "type")
      .withColumnRenamed("_c4", "area")
      .withColumnRenamed("_c5", "real_price")
      .withColumnRenamed("_c6", "unit_price")
      .withColumnRenamed("_c7", "list_price")
      .withColumnRenamed("_c8", "cycle")
      .withColumnRenamed("_c9", "toward")
      .withColumnRenamed("_c10", "furnish")
      .withColumnRenamed("_c11", "floor")
      .withColumnRenamed("_c12", "age")
      .withColumnRenamed("_c13", "zone")

    //3.DataFrame注册成表
    csvDf.createTempView("csv_data")
    val url = "jdbc:mysql://10.7.123.197:3306/zz?useUnicode=false&characterEncoding=utf-8"
    val prop = new Properties()
    prop.put("user", "root")
    prop.put("password", "123456")

    spark.sql(s"select type, avg(unit_price) avg_price, count(*) count ,zone, ${p} historyId ,${k} userId from csv_data group by zone,type order by type")
      .write.mode(SaveMode.Append).jdbc(url, "one", prop)

    spark.sql(s"select avg(unit_price) avg_price, zone, ${p} historyId ,${k} userId from csv_data group by zone")
      .write.mode(SaveMode.Append).jdbc(url, "two", prop)

    spark.sql(s"select toward, zone, ${p} historyId ,${k} userId, count(*) count from csv_data group by zone,toward order by toward")
      .write.mode(SaveMode.Append).jdbc(url, "three", prop)

    spark.sql(s"select furnish, zone, ${p} historyId ,${k} userId, count(*) count from csv_data group by zone,furnish order by furnish")
      .write.mode(SaveMode.Append).jdbc(url, "four", prop)

    spark.sql(s"select floor, zone, ${p} historyId ,${k} userId, avg(unit_price) avg_price,count(*) count from csv_data group by zone,floor order by floor")
      .write.mode(SaveMode.Append).jdbc(url, "five_six", prop)

    spark.sql(s"select quarter(replace(date, '.', '-')) quarter, zone, ${p} historyId ,${k} userId, count(*) count from csv_data group by zone,quarter order by zone")
      .write.mode(SaveMode.Append).jdbc(url, "seven", prop)

    spark.sql(s"select age house_time,zone, ${p} historyId ,${k} userId, count(*) count from csv_data where age between 1990 and 2022 group by zone,house_time order by age ")
      .write.mode(SaveMode.Append).jdbc(url, "eight", prop)

    spark.sql(s"select sum(list_price-real_price) spread_price,zone, ${p} historyId ,${k} userId, count(*) count from csv_data where age between 1990 and 2022 group by zone order by zone ")
      .write.mode(SaveMode.Append).jdbc(url, "nine", prop)

    spark.sql(s"select avg(unit_price) avg_price ,month(replace(date, '.', '-')) month, zone, ${p} historyId ,${k} userId from csv_data group by month,zone order by month")
      .write.mode(SaveMode.Append).jdbc(url, "ten", prop)
    spark.close()
  }
}
