import java.io.PrintWriter

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._

object ajsmeiAnalyze {
  def main(args: Array[String]): Unit = {
    //1.创建Spark环境配置 val sparkConf = new SparkConf().setAppName("AgeStageAnalyze").setMaster("local")
    val sparkConf = new SparkConf().setAppName("ajsmeiAnalyze").setMaster("local")
    //2.创建SparkContext上下文环境
    val sc: SparkContext = new SparkContext(sparkConf)
    //3.定义数据源文件
    //  保存数据分析结果的文件
    val inputFile = "src\\input\\tnb.csv"
    val outputFile = "src\\output\\ajsmei.json"
    //hadoop集群调试
    //    val inputFile = args(0)
    //    val outputFile =args(1)
    //4. 读取数据源文件
    //   4.1 如果源文件第一行表示的是不同列的属性值，则在数据分析时应过滤掉第一行内容;如果没有属性行，则不需要过滤
    val userinfodata: RDD[String] = sc.textFile(inputFile)

    var cleanUserinfoData: RDD[String] = userinfodata.filter(x => {
      val userinfoSplitData: Array[String] = x.split(",")
      userinfoSplitData.length >= 42 && x.startsWith("id,性别") == false
    })

    /*userinfodata.filter( x=> x.startsWith("userId")==false)
      .filter(x=> {val line =x.split(","); line.length>=13})*/


    //   4.2 只获取age特征值
    val ageData = cleanUserinfoData.map(x => {
      val line = x.split(",")
      (line(4),line(5),line(6),line(7),line(12),line(13),line(14),line(15),line(41).toDouble)
    })
    ageData.take(6).foreach(println)
    var result: RDD[(String, String, String, String, String, String, String, String, Double)] = ageData.filter(x => x._9 > 7.0)
    //5. 数据分析
    //5.1   age:(12,34,26,21,....)
    //5.2   按年龄段分析统计
    //            reduceByKey




    // 输出分析的结果
    //    result.foreach(println)

    //6. 将分析结果写入文件
    //     6.1 分析结果转换为标准的json格式的字符串
    //     6.2 PrintWriter 写文件
    /*val jsonResult = "data" -> result.collect().toList.map {

             case (propertyname, count) =>
               ("agestage", propertyname) ~
                 ("count", count)

      }*/


    val jsonResult = "data" -> result.take(10).toList.map {
      data =>
        data match {
          case (value1,value2,value3,value4,value5,value6,value7,value8,value9) =>
            ("天门冬氨酸氨基转换酶", value1) ~
              ("丙氨酸氨基转换酶", value2) ~
              ("碱性磷酸酶", value3) ~
              ("r-谷氨酰基转换酶", value4) ~
              ("甘油三酯", value5) ~
              ("总胆固醇", value6) ~
              ("高密度脂蛋白胆固醇", value7) ~
              ("低密度脂蛋白胆固醇", value8) ~
              ("血糖", value9)

        }
    }

    // 写文件
    // Array((),())
    val outputStream = new PrintWriter(outputFile)
    outputStream.write(compact(render(jsonResult)))
    outputStream.flush()
    outputStream.close()
  }
}
