package com.sdg.spark

import java.text.SimpleDateFormat
import java.util.{Calendar, Date, Properties}

import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
  * 这是大数据分析项目源码
  * 主要处理的业务有:
  * 数据流程:csv==>spark ==>mysql[结果数据]
  */
object MyApplication {
  def main(args: Array[String]): Unit = {
    //创建spark 执行的环境
    val conf = new SparkConf().setMaster("local[2]").setAppName("MyApplication")
    val spark = SparkSession.builder().config(conf).getOrCreate()
    //读取数据
    val srcDF = spark
      .read
      .format("csv")
      .option("header", "false")
      //.option("multiLine", true)
      .load(".\\BigdataAnalysis\\src\\main\\resources\\lianjia_product.csv")

    //修改名称
    val renameDF = srcDF.toDF("area", "plot_id", "price", "plot_name", "property"
      , "date01", "house_id", "elevator", "id", "face", "total_amount", "look", "jing_zhuang"
      , "nian_fen", "title", "guan_zhu", "qu_yu", "qu_xian", "city")
    val prop = new Properties()
    prop.put("user", "root")
    prop.put("password", "root")
    println("1.把原始数据存储到mysql中 rent_lianjia 中")
    renameDF.write.mode("overwrite")
      .jdbc("jdbc:mysql://localhost:3306/renthouse?useUnicode=true&characterEncoding=utf8", "renthouse.rent_lianjia", prop)
    println("写入成功")

    //注册成临时表
    renameDF.createTempView("t_lianjia")
    //进行相应逻辑处理,然后写入到mysql
    renameDF.printSchema()
    //todo:2.按照区域[区域]统计楼盘数[小区ID],房源数[房源ID]并排名[柱状图]
    val houseNumPerArea: DataFrame = spark.
      sql("select qu_yu quyu, count(plot_id) loupan_num,2 * count( house_id) fangyuan_num from t_lianjia group by qu_yu  order by loupan_num desc")
    houseNumPerArea.show()
    houseNumPerArea.write.mode("overwrite")
      .jdbc("jdbc:mysql://localhost:3306/renthouse?useUnicode=true&characterEncoding=utf8", "renthouse.rent_house_per_area", prop)
    println("2.按照区域qu_yu分组统计并将计算结果写入到 rent_house_per_area 表中")
    //todo:3.按区域统计,汇总表查询[那个区域[区域]有多少房源[房源ID]][表格]
    //也就是把上面计算的结果一表格的形式展示
    //这个的话不需要提前处理,只需要在后台进行逻辑处理进行

    //todo:4.按年度季度[要有一个单独字段]月度[2018-08-08] 统计房源[柱状图]
    //这里还需需要再完善
    //这个直接查询mysql的数据表就行[这里逻辑有点复杂]
    //dt,day_count,wk_count,mn_count,year_count 是不是周末,是不是月末,是不是年末
    val yearDF = spark.sql("select substring(date01,0,4) year,count(1)  year_count from t_lianjia group by substring(date01,0,4)")
    yearDF.printSchema()
    val monthDF = spark.sql("select substring(date01,0,7) month,count(1) mn_count from t_lianjia group by substring(date01,0,7)")
    monthDF.printSchema()
    val dayDF = spark.sql("select substring(date01,0,10) dt,count(1) day_count from t_lianjia group by substring(date01,0,10)")
    dayDF.printSchema()
    //判断是否是月末
    spark.udf.register("yearend", (date: String) => {
      val localTime: Calendar = Calendar.getInstance()
      val x: Int = localTime.get(Calendar.YEAR)
      if (date.equals(x + "-12" + "-31")) {
        "Y"
      } else {
        "N"
      }
    })
    //判断是否是年末
    spark.udf.register("monthend", (x: String) => {
      val format = new SimpleDateFormat("yyyy-MM-dd")
      val date: Date = format.parse(x)
      val calendar: Calendar = Calendar.getInstance
      calendar.setTime(date)
      if (calendar.get(Calendar.DATE) == calendar.getActualMaximum(Calendar.DAY_OF_MONTH)) {
        "Y"
      } else {
        "N"
      }
    })

    val idEndDF: DataFrame = spark.sql("select substring(date01,0,10) dt01 ,monthend(substring(date01,0,10)) is_monthend,yearend(substring(date01,0,10)) is_yearend from t_lianjia")

    //合并结果集
    val rent_dj_count = dayDF.join(monthDF, dayDF.col("dt").substr(0, 7).equalTo(monthDF.col("month")), "left")
      .join(yearDF, dayDF.col("dt").substr(0, 4).equalTo(yearDF.col("year")), "left")
      .join(idEndDF, dayDF.col("dt").equalTo(idEndDF.col("dt01")), "left").drop("month", "year", "dt01")
    rent_dj_count.show()
    rent_dj_count.distinct().write.mode("overwrite")
      .jdbc("jdbc:mysql://localhost:3306/renthouse?useUnicode=true&characterEncoding=utf8", "renthouse.rent_dj_count01", prop)

    //区域价格排名
    ///1.房源
    val fyDFCount: DataFrame = spark.sql("SELECT house_id ,COUNT(1) gmv_count,SUM(total_amount) gmv_amount  FROM t_lianjia GROUP BY house_id ")
    fyDFCount.write.mode("overwrite")
      .jdbc("jdbc:mysql://localhost:3306/renthouse?useUnicode=true&characterEncoding=utf8", "renthouse.fy_sum_count", prop)
    ///3.月份
    val monthDFCount: DataFrame = spark.sql("SELECT date01 ,COUNT(1) gmv_count,SUM(total_amount) gmv_amount  FROM t_lianjia GROUP BY date01")
    monthDFCount.write.mode("overwrite")
      .jdbc("jdbc:mysql://localhost:3306/renthouse?useUnicode=true&characterEncoding=utf8", "renthouse.month_sum_count", prop)
    ///2.qy
    val qyDFCount: DataFrame = spark.sql("SELECT qu_yu ,COUNT(1) gmv_count,SUM(total_amount) gmv_amount  FROM t_lianjia GROUP BY qu_yu ")
    qyDFCount.write.mode("overwrite")
      .jdbc("jdbc:mysql://localhost:3306/renthouse?useUnicode=true&characterEncoding=utf8", "renthouse.qy_sum_count", prop)
    spark.stop()
  }

}
