import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.apache.spark.SparkConf
import org.apache.spark.sql.{SaveMode, SparkSession}

import java.io.PrintWriter
case class Cleaned(id:Int,性别:String,年龄:Int,天门冬氨酸氨基转换酶:Double,丙氨酸氨基转换酶:Double,
                   碱性磷酸酶:Double,谷氨酰基转换酶:Double,甘油三酯:Double,总胆固醇:Double,
                   高密度脂蛋白胆固醇:Double,低密度脂蛋白胆固醇:Double,血糖:Double)
object cleantnb {
  def main(args: Array[String]): Unit = {
    //1.创建Spark环境配置 val sparkConf = new SparkConf().setAppName("AgeStageAnalyze").setMaster("local")
    val sparkConf = new SparkConf().setAppName("cleantnb").setMaster("local")
    //2.创建SparkContext上下文环境
    val sc: SparkContext = new SparkContext(sparkConf)

    val spark: SparkSession = SparkSession.builder().config(sparkConf).getOrCreate()

    import spark.implicits._

    //3.定义数据源文件
    //  保存数据分析结果的文件
    val inputFile = "src\\input\\tnb.csv"
    val outputFile = "src\\output\\cleaned\\tnb_cleaned.csv"
    //hadoop集群调试
    //    val inputFile = args(0)
    //    val outputFile =args(1)
    //4. 读取数据源文件
    //   4.1 如果源文件第一行表示的是不同列的属性值，则在数据分析时应过滤掉第一行内容;如果没有属性行，则不需要过滤
    val userinfodata: RDD[String] = sc.textFile(inputFile)



    /*userinfodata.filter( x=> x.startsWith("userId")==false)
      .filter(x=> {val line =x.split(","); line.length>=13})*/


    //   4.2 只获取age特征值
    val cleanData = userinfodata.filter(x => {
      val userinfoSplitData: Array[String] = x.split(",")
      userinfoSplitData.length >= 42 && x.startsWith("id,性别") == false
    }).map(x => {
      val line = x.split(",")

      (line(0), line(1), line(2),
        line(4), line(5), line(6),
        line(7), line(12), line(13),
        line(14), line(15), line(41))
    })
    var a=0
    for(a <- 0 to 1){
      val cleaneddata=cleanData.filter(x => {
        x._1!=""&&x._2!=""&&x._3!=""&&x._4!=""&&x._5!=""&&x._6!=""&&x._7!=""&&x._8!=""&&x._9!=""&&x._10!=""&&x._11!=""&&x._12!=""
      }).map(x=>{
        Cleaned(x._1.toInt+5732*a,x._2,x._3.toInt,x._4.toDouble,x._5.toDouble,x._6.toDouble,x._7.toDouble,x._8.toDouble,x._9.toDouble,x._10.toDouble,x._11.toDouble,x._12.toDouble)
      })
      var cleandatadf = cleaneddata.toDF()
//      cleandatadf.write
//        .format("jdbc")
//        .option("url", "jdbc:mysql://localhost/health_monitoring")
//        .option("user", "root")
//        .option("password", "root")
//        .option("dbtable", "tnb_cleaned")
//        .mode(SaveMode.Append)
//        .save()
      cleandatadf.write
        .format("csv")
        .option("header","true")
        .mode(SaveMode.Append)
        .save(outputFile)
    }


        cleandatadf.write
          .format("jdbc")
          .option("url","jdbc:mysql://192.168.198.124:3306/health_monitoring")
          .option("user","root")
          .option("password","root")
          .option("dbtable","xuetang_table")
          .mode(SaveMode.Append)
          .save()
    //  }
    //        cleandatadf.coalesce(1).write
    //          .format("csv")
    //          .option("header","true")
    //          .mode(SaveMode.Overwrite)
    //          .save(outputFile)
    //      }


  }
}
