import java.io.{File, PrintWriter}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._

object xuetangAnalyze {
  def main(args: Array[String]): Unit = {
    //1.创建Spark环境配置 val sparkConf = new SparkConf().setAppName("AgeStageAnalyze").setMaster("local")
    val sparkConf = new SparkConf().setAppName("bingliAnalyze").setMaster("local")
    //2.创建SparkContext上下文环境
    val sc: SparkContext = new SparkContext(sparkConf)
    //3.定义数据源文件
    //  保存数据分析结果的文件
    val inputFile = "src\\input\\tnb.csv"
    val outputFile = "src\\output\\xuetang.json"
    //hadoop集群调试
    //    val inputFile = args(0)
    //    val outputFile =args(1)
    //4. 读取数据源文件
    //   4.1 如果源文件第一行表示的是不同列的属性值，则在数据分析时应过滤掉第一行内容;如果没有属性行，则不需要过滤
    val userinfodata: RDD[String] = sc.textFile(inputFile)

    var cleanUserinfoData: RDD[String] = userinfodata.filter(x => {
      val userinfoSplitData: Array[String] = x.split(",")
      userinfoSplitData.length >= 42 && x.startsWith("id,性别") == false
    })

    //    cleanUserinfoData.take(3).foreach(println)
    //    var jsonResult = cleanUserinfoData.map(x=>{
    //      var line=x.split(",")
    //      (line(0),line(41))
    //    }).collect()
    //    // 写文件
    //    // Array((),())
    //    // 往磁盘写，执行完下面三行代码，就可以在当前目录找到数据了
    //    printToFile(new File(outputFile)) {
    //      p => jsonResult.foreach(p.println)
    //
    //    }
    //
    //    def printToFile(f: java.io.File)(op: java.io.PrintWriter => Unit) {
    //      val p = new java.io.PrintWriter(f);
    ////      p.write("actionType,")
    ////      p.write("num,")
    ////      p.write("renshu\n")
    //      try {
    //        op(p)
    //      }
    //      finally {
    //        p.close()
    //      }
    //    }
    //    def isarrayitemnull(array: Array[String]): Boolean ={
    //      for (i <- array){
    //        if(i=="")return false
    //      }
    //      return true
    //    }
    //  }}
    //   4.2 只获取age特征值
    val ageData = cleanUserinfoData.map(x => {
      val line = x.split(",")
      (line(0),line(41))
    })
//    ageData.take(6).foreach(println)
    //5. 数据分析
    //5.1   age:(12,34,26,21,....)
    //5.2   按年龄段分析统计
    //            reduceByKey
//    val result: RDD[(String, Int)] = ageData.filter(x => x >= 3.9 & x<=6.1).map(x => ("正常", 1)).reduceByKey((x, y) => x + y)
//      .union(ageData.filter(x => x > 6.1 & x<=7.0).map(x => ("糖尿病前期患者", 1)).reduceByKey((x, y) => x + y))
//      .union(ageData.filter(x => x > 7 ).map(x => ("糖尿病重度患者", 1)).reduceByKey((x, y) => x + y))


    // 输出分析的结果
    //    result.foreach(println)

    //6. 将分析结果写入文件
    //     6.1 分析结果转换为标准的json格式的字符串
    //     6.2 PrintWriter 写文件
    /*val jsonResult = "data" -> result.collect().toList.map {

             case (propertyname, count) =>
               ("agestage", propertyname) ~
                 ("count", count)

      }*/


    val jsonResult = "data" -> ageData.collect().toList.map {
      data =>
        data match {
          case (id, xuetang) =>
            ("id", id) ~
              ("xuetang", xuetang)

        }
    }

    // 写文件
    // Array((),())
    val outputStream = new PrintWriter(outputFile)
    outputStream.write(compact(render(jsonResult)))
    outputStream.flush()
    outputStream.close()
  }
}
