import java.io.PrintWriter

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, Dataset, Row, SaveMode, SparkSession}

/**
 * 根据血糖值判定是否为糖尿病
 * 正常：3.9-6.1
 * 糖尿病前期：6.1-7.0
 * 糖尿病：大于7.0
 */
case class BingliResult(tangtype:String,binglitype:String,number:Double)
object bingliAnalyze {
  def main(args: Array[String]): Unit = {
    //1.创建Spark环境配置 val sparkConf = new SparkConf().setAppName("AgeStageAnalyze").setMaster("local")
    val sparkConf = new SparkConf().setAppName("bingliAnalyze").setMaster("local")
    //2.创建SparkContext上下文环境
    val sc: SparkContext = new SparkContext(sparkConf)
    val spark: SparkSession = SparkSession.builder().config(sparkConf).getOrCreate()

    import spark.implicits._
    //3.定义数据源文件
    //  保存数据分析结果的文件
    val inputFile = "src\\output\\cleaned\\tnb_cleaned.csv\\part-00000-7da365d4-d48c-400d-b8b4-29c7abc55d37-c000.csv"
    val outputFile = "src\\output\\bingli_type.json"
    //hadoop集群调试
    //    val inputFile = args(0)
    //    val outputFile =args(1)
    //4. 读取数据源文件
    var bingliDf : DataFrame =spark.read.format("jdbc")
      .option("url","jdbc:mysql://localhost:3306/health_monitoring")
      .option("user","root")
      .option("password","root")
      .option("dbtable","tnb_cleaned")
      .load()
    val rdd = bingliDf.map(x => {
      (x.toString().stripPrefix("[").stripSuffix("]"))
    }).rdd
    var binglirdd=rdd
    //   4.1 如果源文件第一行表示的是不同列的属性值，则在数据分析时应过滤掉第一行内容;如果没有属性行，则不需要过滤
    val userinfodata: RDD[String] = sc.textFile(inputFile)

    var cleanUserinfoData: RDD[String] = binglirdd.filter(x => {
      val userinfoSplitData: Array[String] = x.split(",")
       x.startsWith("id,gender") == false
    })

    /*userinfodata.filter( x=> x.startsWith("userId")==false)
      .filter(x=> {val line =x.split(","); line.length>=13})*/


    //   4.2 只获取age特征值
    val ageData = cleanUserinfoData.map(x => {
      val line = x.split(",")
      ( line(6).toDouble, line(11).toDouble)
    })
    val yixing = ageData.filter(x => x._1 >= 40 & x._1 <= 49)
    val erxing = ageData.filter(x => x._1 > 49 )
    //5. 数据分析
    //5.1   age:(12,34,26,21,....)
    //5.2   按年龄段分析统计
    //            reduceByKey
    val yixingresult: RDD[(String, Int)] = yixing.filter(x => x._2 >= 3.9 & x._2<=6.1).map(x => ("正常", 1)).reduceByKey((x, y) => x + y)
      .union(yixing.filter(x => x ._2> 6.1 & x._2<=7.0).map(x => ("糖尿病前期患者", 1)).reduceByKey((x, y) => x + y))
      .union(yixing.filter(x => x ._2> 7.0).map(x => ("糖尿病重度患者", 1)).reduceByKey((x, y) => x + y))

    val erxingresult: RDD[(String, Int)] = erxing.filter(x => x._2 >= 3.9 & x._2<=6.1).map(x => ("正常", 1)).reduceByKey((x, y) => x + y)
      .union(erxing.filter(x => x ._2> 6.1 & x._2<=7.0).map(x => ("糖尿病前期患者", 1)).reduceByKey((x, y) => x + y))
      .union(erxing.filter(x => x ._2> 7.0).map(x => ("糖尿病重度患者", 1)).reduceByKey((x, y) => x + y))
//    var resultDF: DataFrame=result.toDF()
    // 输出分析的结果
    //    result.foreach(println)
    val yixingDF = yixingresult.map(x => {
      BingliResult("Ⅰ型糖尿病", x._1, x._2)
    }).toDF()
    val erxingDF = erxingresult.map(x => {
      BingliResult("Ⅱ型糖尿病", x._1, x._2)
    }).toDF()
    yixingDF.write
      .format("jdbc")
      .option("url", "jdbc:mysql://localhost/health_monitoring")
      .option("user", "root")
      .option("password", "root")
      .option("dbtable", "tnb_bingli")
      .mode(SaveMode.Overwrite)
      .save()
    erxingDF.write
      .format("jdbc")
      .option("url", "jdbc:mysql://localhost/health_monitoring")
      .option("user", "root")
      .option("password", "root")
      .option("dbtable", "tnb_bingli")
      .mode(SaveMode.Append)
      .save()
    //6. 将分析结果写入文件
    //     6.1 分析结果转换为标准的json格式的字符串
    //     6.2 PrintWriter 写文件
    /*val jsonResult = "data" -> result.collect().toList.map {

             case (propertyname, count) =>
               ("agestage", propertyname) ~
                 ("count", count)

      }*/


//    val jsonResult = "data" -> result.collect().toList.map {
//      data =>
//        data match {
//          case (persontype, count) =>
//            ("persontype", persontype) ~
//              ("count", count)
//
//        }
//    }

    // 写文件
    // Array((),())
//    val outputStream = new PrintWriter(outputFile)
//    outputStream.write(compact(render(jsonResult)))
//    outputStream.flush()
//    outputStream.close()
  }
}
