package tarns_result.old_version

import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}

import scala.collection.mutable

object CSYD_TABLEMETA_WORLD {
  def main(args: Array[String]): Unit = {
    val conf=new SparkConf().setAppName("CSYD_TABLEMETA_WORLD").setMaster("local[*]")
    val spark=SparkSession.builder().config(conf).getOrCreate()
    val sc=spark.sparkContext
//        val inputpath_json = "hdfs://node01:8020/tmp/InternationalData_Result/HDFSToJson"
//        val inputpath_file = "hdfs://node01:8020/tmp/InternationalData_source/address3.txt"
//        val outpath_sql = "hdfs://node01:8020/tmp/InternationalData_Result/CSYD_TABLEMETA_WORLD/"
//    val outpath_sql = "hdfs://node01:8020/tmp/InternationalData_Result/CSYD_TABLEMETA_WORLD1/"
    //临时跑的教育数据
    val inputpath_json="file:///Users/kelisiduofu/IdeaProjects/zhiwang_file/jiaoyu/trans"
    val inputpath_file="file:///Users/kelisiduofu/IdeaProjects/zhiwang_file/jiaoyu/address3.txt"
    val outpath_sql="file:/Users/kelisiduofu/IdeaProjects/zhiwang_file/jiaoyu/CSYD_TABLEMETA_WORLD/"
    //    ifDirExistsDelete(outpath_sql)
    //1.用sparkSQL加载这个json文件生成一个DataFrame,创建表table1
    //    val dataframe = spark.read.json(inputpath_json + "/part-00000")
    val dataframe = spark.read.json(inputpath_json)
    dataframe.createTempView("table")
    //过滤Period有值的记录
//        spark.sql("select * from table where Period is null").createTempView("table1")
    //临时跑的教育数据-没有Period字段
    spark.sql("select * from table").createTempView("table1")
    //2.处理address3.txt文件,该文件是将rec文件的多行合并成一行的数据格式
    // 从每条记录中提取其中同时拥有"域名代码"和"国家或地区"的记录，并获取这些记录，
    // 用他们new Address类，组成一个RDD[Address],然后再转换成DataSet[Address],之后建临时表table2
    val rdd = sc.textFile(inputpath_file)
    val lines: RDD[String] = rdd.flatMap(line => line.split("<REC>")).filter(lines => lines != "")
    val rdd2: RDD[Map[String, String]] = lines.map(line => {
      val arr: Array[String] = line.split("<")
      val arr2: Array[String] = arr.filter(x => x != "") //每个元素是k_v
      val arr3: Array[Array[String]] = arr2.map(x => x.split(">=")) //每个元素是arr(key,value)
      val my_map = new mutable.HashMap[String, String]()
      val arr4 = arr3.filter(line => line.length == 2) //过滤掉没有arr(1)的元素,防止下标越界
      //只要其中的两个字段，两个字段并非都有
      arr4.foreach { x => {
        if (x(0) == "域名代码" || x(0) == "国家或地区")
          my_map += (x(0) -> x(1))
      }
      }
      my_map.toMap
    })
    //同时含有两个字段
    val has_two_col: RDD[Map[String, String]] = rdd2.filter(map => map.keys.toArray.length == 2)
    val address_rdd: RDD[Address2] = has_two_col.map { k_v => {
      Address2(k_v.getOrElse("域名代码", ""), k_v.getOrElse("国家或地区", ""))
    }
    }
    //println(has_two_col.count())//--222
    val distinct_rdd = address_rdd.distinct() //去重，测试表明是可以去重的
    //println(distinct_rdd.count())//--222
    import spark.implicits._
    val ds: Dataset[Address2] = distinct_rdd.toDS()
    ds.createTempView("table2")

    //3.选择想要的字段
    val spark_sql =
      """
        |select
        |tm_time,
        |tm_address,
        |complete_indicator,
        |tm_original_value,
        |tm_original_uint,
        |tm_common_value,
        |tm_common_uint,
        |address_code,
        |tm_fuzzy_address,
        |concat_ws("，",tm_time,tm_address,complete_indicator,tm_original_value) knowledge_element,
        |address_level,
        |inbound_identity,
        |excel_year_code,
        |year_code_chinese_name,
        |year_year,
        |entry_title,
        |quote,
        |gauncangbanzhuanjizhuantidaima,
        |year,
        |address_show,
        |tm_province,
        |valid_tag,
        |rearch_indicator,
        |tm_fuzzy_indicator,
        |concat_ws("，",tm_time,tm_address,address_code,country_or_region,complete_indicator,tm_original_value) tm_knowledge_element
        |from
        |(
        |select
        |country_or_region,
        |concat(Year,"年") tm_time,
        |Reporter tm_address,
        |Indicator complete_indicator,
        |ObservationValue tm_original_value,
        |Unit tm_original_uint,
        |ObservationValue tm_common_value,
        |Unit tm_common_uint,
        |ReporterCode address_code,
        |concat_ws("，",FuzzyReporter,country_or_region) tm_fuzzy_address,
        |cast(1 as int ) address_level,
        |cast(1 as int ) inbound_identity,
        |"N2020WBDATA" excel_year_code,
        |"WordBank" year_code_chinese_name,
        |Year year_year,
        |SourceName entry_title,
        |concat_ws(">>","WordBank",Year,SourceName,Reporter,Indicator,Year,ObservationValue) quote,
        |ReporterCode gauncangbanzhuanjizhuantidaima,
        |Year year,
        |Reporter address_show,
        |"世界范围" tm_province,
        |cast(0 as int) valid_tag,
        |Indicator rearch_indicator,
        |FuzzyIndicator tm_fuzzy_indicator
        |from
        |table1
        |left join
        |table2
        |on table1.ReporterCode=table2.domain_code
        |)t
        |""".stripMargin
    val result: DataFrame = spark.sql(spark_sql)
    //    result.show()

    //4.将数据以json串的形式写入到hdfs
//    result.repartition(6).write.json(outpath_sql)
    result.repartition(1).write.json(outpath_sql)
    spark.stop()
  }
}

