package com.baicun.wish.spark


import org.apache.spark.sql.types.{DoubleType, IntegerType, StringType, StructField, StructType}
import org.apache.spark.sql.{SaveMode, SparkSession}

/**
 * @author baicun
 * @description
 */
object DataHandle {


  val sparkSession = SparkSession.builder().appName("DataHandle").master("local[16]").getOrCreate()

//  val hdfsPath = "hdfs://master:9000/baicun/"
  val hdfsPath = "Y:\\Projects\\IDEA\\development\\wish\\wish-data\\src\\main\\java\\com\\baicun\\wish\\csv\\"

  def main(args: Array[String]): Unit = {
    //    远程数据库：112.124.18.46 8.0.18
    //    远程数据库：39.103.136.141 8.0.20
    //    localhost 8.0.18

//    handle("college.csv","39.103.136.141","college")
//    handle("college_basic.csv","39.103.136.141","college_basic")
//    handleProvince("province_admit.csv","39.103.136.141","province_admit")
//    handleCollegeAdmit("college_admit.csv","39.103.136.141","college_admit")
//    handleCollegeMajorAdmit("college_major_admit.csv","39.103.136.141","college_major_admit")

    handleCollegeAdmitToRecommend("college_admit.csv","localhost","wish_recommend")

  }

  def handle(filename:String , url:String , dbtable:String ): Unit = {

    val filenamePath = hdfsPath + filename

    val dataFrame = sparkSession.read.option("header", value = true).csv(filenamePath).where("college_name != ''")

    import org.apache.spark.sql.functions._
    val result = dataFrame.withColumn("id", monotonically_increasing_id)

    result.show()
    result.printSchema()

    result.write.format("jdbc")
      .option("url","jdbc:mysql://"+url+":3306/wish?useSSL=false&useUnicode=true&characterEcoding=utf-8&serverTimezone=Asia/Shanghai")
      .option("driver", "com.mysql.cj.jdbc.Driver")
      .option("user","root")
      .option("password","root")
      .option("dbtable",dbtable)
      .mode(SaveMode.Overwrite)
      .save()
  }

  def handleProvince(filename:String , url:String , dbtable:String ): Unit = {

    val filenamePath = hdfsPath + filename
    val dataFrame = sparkSession.read.option("header", value = true).csv(filenamePath)

    import org.apache.spark.sql.functions._
    val result = dataFrame.withColumn("id", monotonically_increasing_id)

    result.show()
    result.printSchema()

    result.write.format("jdbc")
      .option("url","jdbc:mysql://"+url+":3306/wish?useSSL=false&useUnicode=true&characterEcoding=utf-8&serverTimezone=Asia/Shanghai")
      .option("driver", "com.mysql.cj.jdbc.Driver")
      .option("user","root")
      .option("password","root")
      .option("dbtable",dbtable)
      .mode(SaveMode.Overwrite)
      .save()
  }

  def handleCollegeAdmit(filename:String , url:String , dbtable:String ): Unit = {

    val filenamePath = hdfsPath + filename

    val dataFrame = sparkSession.read.option("header", value = true).csv(filenamePath)
      .where("college_name != ''")
      .where("lowest_score !='--'")
      .where("lowest_rank != 0 ")

    import org.apache.spark.sql.functions._
    val result = dataFrame.withColumn("id", monotonically_increasing_id)
    result.show()
    result.printSchema()

    result.write.format("jdbc")
      .option("url","jdbc:mysql://"+url+":3306/wish?useSSL=false&useUnicode=true&characterEcoding=utf-8&serverTimezone=Asia/Shanghai")
      .option("driver", "com.mysql.cj.jdbc.Driver")
      .option("user","root")
      .option("password","root")
      .option("dbtable",dbtable)
      .mode(SaveMode.Overwrite)
      .save()
  }

  def handleCollegeMajorAdmit(filename:String , url:String , dbtable:String ): Unit = {

    val filenamePath = hdfsPath +filename


    val dataFrame = sparkSession.read.option("header", value = true).csv(filenamePath)
      .where("college_name != ''")
      .where("province != '省份'")
      .where("exam_type != '科类'")
      .where("year != '年份'")
      .where("year != '文科'")
      .where("year != '理科'")
      .where("lowest_score != '-'")
      .where("lowest_rank != '-'")


    import org.apache.spark.sql.functions._
    val result = dataFrame.withColumn("id", monotonically_increasing_id)
    result.show()
    result.printSchema()

    result.write.format("jdbc")
      .option("url","jdbc:mysql://"+url+":3306/wish?useSSL=false&useUnicode=true&characterEcoding=utf-8&serverTimezone=Asia/Shanghai")
      .option("driver", "com.mysql.cj.jdbc.Driver")
      .option("user","root")
      .option("password","root")
      .option("dbtable",dbtable)
      .mode(SaveMode.Overwrite)
      .save()
  }

  def handleCollegeAdmitToRecommend(filename:String , url:String , dbtable:String ): Unit = {

    val filenamePath = hdfsPath + filename

    val schema = StructType(
      List(
        StructField("college_name", StringType),
        StructField("province", StringType),
        StructField("exam_type", StringType),
        StructField("year", IntegerType),
        StructField("highest_score", IntegerType),
        StructField("average_score", IntegerType),
        StructField("lowest_score", DoubleType),
        StructField("lowest_rank", IntegerType),
        StructField("admit_batch", StringType),
      )
    )


    val dataFrame = sparkSession.read.option("header", value = true).schema(schema).csv(filenamePath)
      .where("college_name != ''")
      .where("lowest_score != 0.0 ")
      .where("lowest_rank != 0 ")
      .groupBy("college_name","exam_type")
      .avg("lowest_score","lowest_rank")



    println("总数："+dataFrame.count())
    import org.apache.spark.sql.functions._
    val result = dataFrame.withColumn("id", monotonically_increasing_id)



    result.show(3000)
    result.printSchema()

    result.write.format("jdbc")
      .option("url","jdbc:mysql://"+url+":3306/wish?useSSL=false&useUnicode=true&characterEcoding=utf-8&serverTimezone=Asia/Shanghai")
      .option("driver", "com.mysql.cj.jdbc.Driver")
      .option("user","root")
      .option("password","root")
      .option("dbtable",dbtable)
      .mode(SaveMode.Overwrite)
      .save()
  }
}
