import java.io.PrintWriter
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

case class AgeResult(binglitype:String,stage:String,number:Int)
object AgeStageAnalyze {

  def main(args: Array[String]): Unit = {
    //1.创建Spark环境配置 val sparkConf = new SparkConf().setAppName("AgeStageAnalyze").setMaster("local")
    val sparkConf = new SparkConf().setAppName("AgeStageAnalyze").setMaster("local")
    //2.创建SparkContext上下文环境
    val sc:SparkContext = new SparkContext(sparkConf)
    val spark: SparkSession = SparkSession.builder().config(sparkConf).getOrCreate()

    import spark.implicits._
    //3.定义数据源文件
    //  保存数据分析结果的文件
    val inputFile = "src\\output\\cleaned\\tnb_cleaned.csv\\part-00000-7da365d4-d48c-400d-b8b4-29c7abc55d37-c000.csv"
    val outputFile = "src\\output\\agestage333.json"
    //hadoop集群调试
    //    val inputFile = args(0)
    //    val outputFile =args(1)
    //4. 读取数据源文件
    var bingliDf : DataFrame =spark.read.format("jdbc")
      .option("url","jdbc:mysql://localhost:3306/health_monitoring")
      .option("user","root")
      .option("password","root")
      .option("dbtable","tnb_cleaned")
      .load()
    val rdd = bingliDf.map(x => {
      (x.toString().stripPrefix("[").stripSuffix("]"))
    }).rdd
    var binglirdd=rdd
    //   4.1 如果源文件第一行表示的是不同列的属性值，则在数据分析时应过滤掉第一行内容;如果没有属性行，则不需要过滤
    val userinfodata :RDD[String] = sc.textFile(inputFile)

    var cleanUserinfoData: RDD[String] = binglirdd.filter(x => {
      val userinfoSplitData: Array[String] = x.split(",")
      x.startsWith("id,gender") == false
    })

    /*userinfodata.filter( x=> x.startsWith("userId")==false)
      .filter(x=> {val line =x.split(","); line.length>=13})*/


    //   4.2 只获取age特征值
    val ageData = cleanUserinfoData.map( x=> {
      val line = x.split(",")
      (line(2).toInt,line(11).toDouble)
    })
    ageData.take(2).foreach(println)
    val qianqi=ageData.filter(x => x._2 > 6.1 & x._2<=7.0)
    val zhongdu=ageData.filter(x => x ._2> 7.0)
    val qianqiresult:RDD[(String,Int)] =qianqi.filter( x=> x._1>=1 && x._1<=10).map( x => ("10岁以下",1)).reduceByKey((x,y) => x+y)
      .union(qianqi.filter(x => x._1>10 && x._1<=20).map( x => ("10~20",1)).reduceByKey((x,y)=> x+ y))
      .union(qianqi.filter(x => x._1>20 && x._1<=30).map( x => ("20~30",1)).reduceByKey((x,y)=> x+ y))
      .union(qianqi.filter(x => x._1>30 && x._1<40).map( x => ("30~40",1)).reduceByKey((x,y)=> x+ y))
      .union(qianqi.filter(x => x._1>40 && x._1<=50).map( x => ("40~50",1)).reduceByKey((x,y) => x+ y))
      .union(qianqi.filter(x => x._1>50 && x._1<=60).map( x => ("50~60",1)).reduceByKey((x,y)=> x+ y))
      .union(qianqi.filter(x => x._1>60 && x._1<=70).map( x => ("60~70",1)).reduceByKey((x,y)=> x+ y))
      .union(qianqi.filter(x => x._1>70 && x._1<=80).map( x => ("70~80",1)).reduceByKey((x,y)=> x+ y))
      .union(qianqi.filter(x => x._1>80 && x._1<=90).map( x => ("80~90",1)).reduceByKey((x,y)=> x+ y))
      .union(qianqi.filter(x => x._1>90).map( x => ("90岁以上",1)).reduceByKey((x,y)=> x+ y))
    val zhongduresult:RDD[(String,Int)] =qianqi.filter( x=> x._1>=1 && x._1<=10).map( x => ("10岁以下",1)).reduceByKey((x,y) => x+y)
      .union(zhongdu.filter(x => x._1>10 && x._1<=20).map( x => ("10~20",1)).reduceByKey((x,y)=> x+ y))
      .union(zhongdu.filter(x => x._1>20 && x._1<=30).map( x => ("20~30",1)).reduceByKey((x,y)=> x+ y))
      .union(zhongdu.filter(x => x._1>30 && x._1<40).map( x => ("30~40",1)).reduceByKey((x,y)=> x+ y))
      .union(zhongdu.filter(x => x._1>40 && x._1<=50).map( x => ("40~50",1)).reduceByKey((x,y) => x+ y))
      .union(zhongdu.filter(x => x._1>50 && x._1<=60).map( x => ("50~60",1)).reduceByKey((x,y)=> x+ y))
      .union(zhongdu.filter(x => x._1>60 && x._1<=70).map( x => ("60~70",1)).reduceByKey((x,y)=> x+ y))
      .union(zhongdu.filter(x => x._1>70 && x._1<=80).map( x => ("70~80",1)).reduceByKey((x,y)=> x+ y))
      .union(zhongdu.filter(x => x._1>80 && x._1<=90).map( x => ("80~90",1)).reduceByKey((x,y)=> x+ y))
      .union(zhongdu.filter(x => x._1>90).map( x => ("90岁以上",1)).reduceByKey((x,y)=> x+ y))
    val qianqiDF = qianqiresult.map(x => {
      AgeResult("糖尿病前期患者", x._1, x._2)
    }).toDF()
    val zhongduDF = zhongduresult.map(x => {
      AgeResult("糖尿病重度患者", x._1, x._2)
    }).toDF()
          qianqiDF.write
            .format("jdbc")
            .option("url", "jdbc:mysql://localhost/health_monitoring")
            .option("user", "root")
            .option("password", "root")
            .option("dbtable", "tnb_age")
            .mode(SaveMode.Overwrite)
            .save()
         zhongduDF.write
      .format("jdbc")
      .option("url", "jdbc:mysql://localhost/health_monitoring")
      .option("user", "root")
      .option("password", "root")
      .option("dbtable", "tnb_age")
      .mode(SaveMode.Append)
      .save()

//    // 输出分析的结果
////    result.foreach(println)
//
//    //6. 将分析结果写入文件
//    //     6.1 分析结果转换为标准的json格式的字符串
//    //     6.2 PrintWriter 写文件
//    /*val jsonResult = "data" -> result.collect().toList.map {
//
//             case (propertyname, count) =>
//               ("agestage", propertyname) ~
//                 ("count", count)
//
//      }*/
//
    val jsonResult = "糖尿病前期患者" -> qianqiresult.collect().toList.map {
      data =>
        data match {
          case (name, value) =>
            ("name", name) ~
              ("value",value)
        }
    }
    val jsonResult1 = "糖尿病重度患者" -> zhongduresult.collect().toList.map {
      data =>
        data match {
          case (name, value) =>
            ("name", name) ~
              ("value",value)
        }
    }


    // 写文件
    // Array((),())
    val outputStream = new PrintWriter(outputFile)
    outputStream.write(compact(render(jsonResult)))
    outputStream.write(compact(render(jsonResult1)))
    outputStream.flush()
    outputStream.close()
  }

}