import java.io.PrintWriter

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, Dataset, Row, SaveMode, SparkSession}
object test {
  def main(args: Array[String]): Unit = {
    //1.创建Spark环境配置 val sparkConf = new SparkConf().setAppName("AgeStageAnalyze").setMaster("local")
    val sparkConf = new SparkConf().setAppName("test").setMaster("local")
    //2.创建SparkContext上下文环境
    val sc: SparkContext = new SparkContext(sparkConf)
    val spark: SparkSession = SparkSession.builder().config(sparkConf).getOrCreate()

    import spark.implicits._
    var bingliDf : DataFrame =spark.read.format("jdbc")
      .option("url","jdbc:mysql://localhost:3306/health_monitoring")
      .option("user","root")
      .option("password","root")
      .option("dbtable","tnb_cleaned")
      .load()

    val rdd = bingliDf.map(x => {
      (x(0).toString, x(1).toString)
    }).rdd
    var binglirdd=rdd
    binglirdd.take(1).foreach(println)
    val inputFile = "src\\output\\cleaned\\tnb_cleaned.csv\\part-00000-7da365d4-d48c-400d-b8b4-29c7abc55d37-c000.csv"
    val outputFile = "src\\output\\bingli_type.json"
    val userinfodata: RDD[String] = sc.textFile(inputFile)
    userinfodata.take(1).foreach(println)
  }
}
