package pre

import org.apache.spark.util.LongAccumulator
import org.apache.spark.{SparkConf, SparkContext}

object ManyLineToOneLine {
  //将分布在多行的一条json串，转换成分布在一行
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("ManyLineToOneLine").setMaster("local[4]").set("","")
    val sc = new SparkContext(conf)
//    val input = "file:///Users/kelisiduofu/IdeaProjects/zhiwang_file/jiaoyu/Detaildic_IBRD_many_lines.json"
    val input = "file:///Users/kelisiduofu/IdeaProjects/zhiwang_file/jiaoyu/WorldDevelopmentIndicators.json"
    val lines = sc.textFile(input)
    val my_lines = lines.repartition(1)
    val num = sc.longAccumulator("num")

    val num_line = my_lines.map { line => {
      val l = line.trim
      if (l == "{") {
        num.add(1L);
        (num, l)
      }
      else if(l.contains(":")) {
        val s = l.split(":")
        if(s.length==2) {
          if (s(0).trim == "\"_id\"") {
            //"_id":ObjectId("5ecb68cb220734200c158e6b")  转"_id":{"$oid":"5ecb68d6220734200c159043"}
            val value = s(1).trim.replace("ObjectId(", "{\"$oid\":").replace(")", "}")
            (num, s(0).trim + ":" + value)
          }
          else (num, s(0).trim + ":" + s(1).trim )
        }
        else  (num,l) //这种特殊情况"Economic Policy & Debt: Balance of payments: Current account: Balances"
      }
      else (num, l)
    }}




    val nl = num_line.map { case (x: LongAccumulator, y: String) => (x.value, y) }
    val res = nl.reduceByKey(_ + _).map { case (x, y) => y }
    //      res.filter(line=> !{ line.contains("}") && line.contains("{")}).foreach(println(_))
    //很奇怪没中间切断，但是很奇怪，顺序又和输入不同
    res.saveAsTextFile("/Users/kelisiduofu/IdeaProjects/zhiwang_file/jiaoyu/WorldDevelopmentIndicators")

    num.reset()
    sc.stop()
  }
}

