package tarns_result.old_version



import common.Tools.ifDirExistsDelete
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Dataset, SparkSession}

import scala.collection.mutable;
object YEARBOOKTABLEDATA2008_FORMAL_REPORTGROUPER_GUOJI_2019_ZL {
  def main(args: Array[String]): Unit = {
    val conf=new SparkConf().setAppName("YEARBOOKTABLEDATA2008_FORMAL_REPORTGROUPER_GUOJI_2019_ZL").setMaster("local[*]")
    val spark=SparkSession.builder().config(conf).getOrCreate()
    val sc=spark.sparkContext

    val inputpath_json="file:///Users/kelisiduofu/IdeaProjects/zhiwang_file/jiaoyu/trans"
    val inputpath_file="file:///Users/kelisiduofu/IdeaProjects/zhiwang_file/jiaoyu/address3.txt"
    val outpath_sql="file:///Users/kelisiduofu/IdeaProjects/zhiwang_file/jiaoyu/YEARBOOKTABLEDATA2008_FORMAL_REPORTGROUPER_GUOJI_2019_ZL/"

//    ifDirExistsDelete(outpath_json)
    //1.用sparkSQL加载这个json文件生成一个DataFrame
//    val dataframe=spark.read.json(inputpath_json+"/part-00000")
    val dataframe=spark.read.json(inputpath_json)
    dataframe.createTempView("table")
    //过滤Period有值的记录
//    spark.sql("select * from table where Period is null").createTempView("table1")
    spark.sql("select * from table").createTempView("table1")
    //2.处理address3.txt文件,该文件是将rec文件的多行合并成一行的数据格式
    //从每条记录中提取其中同时拥有"域名代码"和"所属大洲"的记录，并获取这些记录，
    //用他们new Address类，组成一个RDD[Address],然后再转换成DataSet[Address],之后建临时表table2
    val rdd=sc.textFile(inputpath_file)
    val lines: RDD[String] = rdd.flatMap(line => line.split("<REC>")).filter(lines=>lines!="")
    val rdd2: RDD[Map[String,String]] = lines.map(line => {
      val arr: Array[String] = line.split("<")
      val arr2: Array[String] = arr.filter(x => x != "") //每个元素是k_v
      val arr3: Array[Array[String]] = arr2.map(x => x.split(">=")) //每个元素是arr(key,value)
      val my_map = new mutable.HashMap[String, String]()
      val arr4 = arr3.filter(line => line.length == 2) //过滤掉没有arr(1)的元素,防止下标越界
      //只要其中的两个字段，两个字段并非都有
      arr4.foreach { x => {
        if (x(0) == "域名代码" || x(0) == "所属大洲")
          my_map += (x(0) -> x(1))
      }}
      my_map.toMap})
    //同时含有两个字段
    val has_two_col: RDD[Map[String, String]] = rdd2.filter(map => map.keys.toArray.length == 2)
    val address_rdd: RDD[Address] = has_two_col.map { k_v => {
      Address(k_v.getOrElse("域名代码",""), k_v.getOrElse("所属大洲",""))
    }}
    //println(has_two_col.count())--218
    val distinct_rdd=address_rdd.distinct()//去重，测试表明是可以去重的
    //println(distinct_rdd.count())--218
    import spark.implicits._
    val ds: Dataset[Address] = distinct_rdd.toDS()
    ds.createTempView("table2")

    //3.按照Indicator,ReporterCode,Year进行去重
    val spark_sql="""
                    |select
                    |year,
                    |formal_indicator,
                    |reporter_code,
                    |address_level,
                    |concat_ws("€",year,reporter_code,formal_indicator) report_grouper,
                    |show_value,
                    |show_unit,
                    |file_name_x,
                    |year_chinese_name,
                    |year_year_x,
                    |item_head_name_x,
                    |num_x,
                    |inbound_flag,
                    |internationnal_place_num
                    |from
                    |(
                    |select
                    |concat(Year,"年") year,
                    |Indicator formal_indicator,
                    |ReporterCode reporter_code,
                    |cast(1 as int) address_level,
                    |ObservationValue show_value,
                    |Unit show_unit,
                    |SourceName file_name_x,
                    |"WordBank" year_chinese_name,
                    |Year year_year_x,
                    |Reporter item_head_name_x,
                    |"N2020WBDATA" num_x,
                    |cast(1 as int) inbound_flag,
                    |ReporterCode internationnal_place_num,
                    |row_number() over(partition by Indicator,ReporterCode,Year order by Indicator,ReporterCode,Year) num
                    |from table1
                    |)t
                    |where num=1
                    |""".stripMargin

    spark.sql(spark_sql).createTempView("table3")
    //3.关联表得到belongs_continent
    val join_sql=
      """
        |select
        |year,
        |formal_indicator,
        |reporter_code,
        |address_level,
        |report_grouper,
        |show_value,
        |show_unit,
        |file_name_x,
        |year_chinese_name,
        |year_year_x,
        |item_head_name_x,
        |num_x,
        |inbound_flag,
        |belongs_continent,
        |internationnal_place_num
        |from
        |table3
        |left join
        |table2
        |on table3.reporter_code=table2.domain_code
        |""".stripMargin
    val result=spark.sql(join_sql)

    //5.将数据以json串的形式写入到hdfs
//    result.repartition(6).write.json(outpath_sql)
    result.repartition(1).write.json(outpath_sql)
  }
}

