package tarns_result.old_version

import common.Tools.ifDirExistsDelete
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}

import scala.collection.mutable
object YEARBOOKTABLEDATA2008_FORMAL_GUOJI {
  def main(args: Array[String]): Unit = {
    val conf=new SparkConf().setAppName("YEARBOOKTABLEDATA2008_FORMAL_GUOJI").setMaster("local[*]")
    val spark=SparkSession.builder().config(conf).getOrCreate()
    val sc=spark.sparkContext
//    val inputpath_json="hdfs://node01:8020/tmp/InternationalData_Result/HDFSToJson"
//    val inputpath_file="hdfs://node01:8020/tmp/InternationalData_source/address3.txt"
//    val outpath_sql="hdfs://node01:8020/tmp/InternationalData_Result/YEARBOOKTABLEDATA2008_FORMAL_GUOJI/"
    val inputpath_json="file:///Users/kelisiduofu/IdeaProjects/zhiwang_file/jiaoyu/trans"
    val inputpath_file="file:///Users/kelisiduofu/IdeaProjects/zhiwang_file/jiaoyu/address3.txt"
    val outpath_sql="file:///Users/kelisiduofu/IdeaProjects/zhiwang_file/jiaoyu/YEARBOOKTABLEDATA2008_FORMAL_GUOJI/"
//    ifDirExistsDelete(outpath_sql)
    //1用sparkSQL加载这个json文件生成一个DataFrame创建表table1
//    val dataframe=spark.read.json(inputpath_json+"/part-00000")
    val dataframe=spark.read.json(inputpath_json)
    dataframe.createTempView("table")
    //过滤Period有值的记录
//    spark.sql("select * from table where Period is null").createTempView("table1")
    spark.sql("select * from table").createTempView("table1")
    //2.处理domain_code_belongs_continent3.txt文件,该文件是将rec文件的多行合并成一行的数据格式
    // 从每条记录中提取其中同时拥有"域名代码"和"国家或地区"的记录，并获取这些记录，
    // 用他们new Address类，组成一个RDD[Address],然后再转换成DataSet[Address],之后建临时表table2
    val rdd=sc.textFile(inputpath_file)
    val lines: RDD[String] = rdd.flatMap(line => line.split("<REC>")).filter(lines=>lines!="")
    val rdd2: RDD[Map[String,String]] = lines.map(line => {
      val arr: Array[String] = line.split("<")
      val arr2: Array[String] = arr.filter(x => x != "") //每个元素是k_v
      val arr3: Array[Array[String]] = arr2.map(x => x.split(">=")) //每个元素是arr(key,value)
      val my_map = new mutable.HashMap[String, String]()
      val arr4 = arr3.filter(line => line.length == 2) //过滤掉没有arr(1)的元素,防止下标越界
      //只要其中的两个字段，两个字段并非都有
      arr4.foreach { x => {
        if (x(0) == "域名代码" || x(0) == "国家或地区")
          my_map += (x(0) -> x(1))
      }}
      my_map.toMap
    })
    //同时含有两个字段
    val has_two_col: RDD[Map[String, String]] = rdd2.filter(map => map.keys.toArray.length == 2)
    val address_rdd: RDD[Address2] = has_two_col.map { k_v => {
      Address2(k_v.getOrElse("域名代码",""), k_v.getOrElse("国家或地区",""))
    }}
    //println(has_two_col.count())//--222
    val distinct_rdd=address_rdd.distinct()//去重，测试表明是可以去重的
    //println(distinct_rdd.count())//--222
    import spark.implicits._
    val ds: Dataset[Address2] = distinct_rdd.toDS()
    ds.createTempView("table2")

    //-------新添-------
    val rdd2_2: RDD[Map[String,String]] = lines.map(line => {
      val arr: Array[String] = line.split("<")
      val arr2: Array[String] = arr.filter(x => x != "") //每个元素是k_v
      val arr3: Array[Array[String]] = arr2.map(x => x.split(">=")) //每个元素是arr(key,value)
      val my_map = new mutable.HashMap[String, String]()
      val arr4 = arr3.filter(line => line.length == 2) //过滤掉没有arr(1)的元素,防止下标越界
      //只要其中的两个字段，两个字段并非都有
      arr4.foreach { x => {
        if (x(0) == "域名代码" || x(0) == "所属大洲")
          my_map += (x(0) -> x(1))
      }}
      my_map.toMap
    })
    //同时含有两个字段
    val has_two_col_2: RDD[Map[String, String]] = rdd2_2.filter(map => map.keys.toArray.length == 2)
    val address_rdd_2: RDD[Address] = has_two_col_2.map { k_v => {
      Address(k_v.getOrElse("域名代码",""), k_v.getOrElse("所属大洲",""))
    }}
    val distinct_rdd_2=address_rdd_2.distinct()//去重，测试表明是可以去重的
    val ds_2: Dataset[Address] = distinct_rdd_2.toDS()
    ds_2.createTempView("table3")






    //3.从table1,table2中选择想要的字段
//    val spark_sql=
//      """
//        |select
//        |my_year,
//        |address,
//        |format_indicator,
//        |format_value,
//        |format_unit,
//        |original_value,
//        |original_unit,
//        |concat_ws("，",my_year,address,format_indicator,format_value) knowledge_element,
//        |content_notes,
//        |year_chinese,
//        |year_year,
//        |address_code,
//        |address_level,
//        |fuzzy_address,
//        |search_indicator,
//        |fuzzy_indicator,
//        |inbound_indentify,
//        |year_code,
//        |valid_tag,
//        |year_name,
//        |indicator_type,
//        |indicator_sort,
//        |belongs_continent,
//        |file_name,
//        |item_name,
//        |tm_fuzzy_indicator,
//        |concat_ws("€",my_year,country_flag_address_code,search_indicator) REPORTGROUPER,
//        |show_value,
//        |show_unit,
//        |file_name_x,
//        |year_chinese_name,
//        |year_year_x,
//        |item_title_x,
//        |code_x,
//        |inbound_indentify_x,
//        |country_flag_address_code
//        |from
//        |(
//        |select
//        |concat(Year,"年") my_year,
//        |Reporter address,
//        |Indicator format_indicator,
//        |ObservationValue format_value,
//        |Unit format_unit,
//        |ObservationValue original_value,
//        |Unit  original_unit,
//        |FOOTNOTE content_notes,
//        |"WorldBank" year_chinese,
//        |Year year_year,
//        |ReporterCode address_code,
//        |cast(1 as int) address_level,
//        |concat_ws("，",FuzzyReporter,country_or_region) fuzzy_address,
//        |Indicator search_indicator,
//        |FuzzyIndicator fuzzy_indicator,
//        |cast(1 as int) inbound_indentify,
//        |"N2020WBDATA" year_code,
//        |cast(0 as int) valid_tag,
//        |SourceName year_name,
//        |IndicatorCategory indicator_type,
//        |cast(0 as int) indicator_sort,
//        |belongs_continent,
//        |"N" file_name,
//        |Reporter item_name,
//        |Indicator tm_fuzzy_indicator,
//        |ObservationValue show_value,
//        |Unit show_unit,
//        |SourceName file_name_x,
//        |"WorldBank" year_chinese_name,
//        |Year year_year_x,
//        |Reporter item_title_x,
//        |"N2020WBDATA" code_x,
//        |cast(1 as int) inbound_indentify_x,
//        |ReporterCode country_flag_address_code
//        |from
//        |table1
//        |left join table2
//        |on table1.ReporterCode=table2.domain_code
//        |left join table3
//        |on table1.ReporterCode=table3.domain_code
//        |)t
//        |""".stripMargin

    val spark_sql=//上面sql里有些字段不存在，在下面的sql里删了
      """
        |select
        |my_year,
        |address,
        |format_indicator,
        |format_value,
        |format_unit,
        |original_value,
        |original_unit,
        |concat_ws("，",my_year,address,format_indicator,format_value) knowledge_element,
        |year_chinese,
        |year_year,
        |address_code,
        |address_level,
        |fuzzy_address,
        |search_indicator,
        |fuzzy_indicator,
        |inbound_indentify,
        |year_code,
        |valid_tag,
        |year_name,
        |indicator_sort,
        |belongs_continent,
        |file_name,
        |item_name,
        |tm_fuzzy_indicator,
        |concat_ws("€",my_year,country_flag_address_code,search_indicator) REPORTGROUPER,
        |show_value,
        |show_unit,
        |file_name_x,
        |year_chinese_name,
        |year_year_x,
        |item_title_x,
        |code_x,
        |inbound_indentify_x,
        |country_flag_address_code
        |from
        |(
        |select
        |concat(Year,"年") my_year,
        |Reporter address,
        |Indicator format_indicator,
        |ObservationValue format_value,
        |Unit format_unit,
        |ObservationValue original_value,
        |Unit  original_unit,
        |"WorldBank" year_chinese,
        |Year year_year,
        |ReporterCode address_code,
        |cast(1 as int) address_level,
        |concat_ws("，",FuzzyReporter,country_or_region) fuzzy_address,
        |Indicator search_indicator,
        |FuzzyIndicator fuzzy_indicator,
        |cast(1 as int) inbound_indentify,
        |"N2020WBDATA" year_code,
        |cast(0 as int) valid_tag,
        |SourceName year_name,
        |cast(0 as int) indicator_sort,
        |belongs_continent,
        |"N" file_name,
        |Reporter item_name,
        |Indicator tm_fuzzy_indicator,
        |ObservationValue show_value,
        |Unit show_unit,
        |SourceName file_name_x,
        |"WorldBank" year_chinese_name,
        |Year year_year_x,
        |Reporter item_title_x,
        |"N2020WBDATA" code_x,
        |cast(1 as int) inbound_indentify_x,
        |ReporterCode country_flag_address_code
        |from
        |table1
        |left join table2
        |on table1.ReporterCode=table2.domain_code
        |left join table3
        |on table1.ReporterCode=table3.domain_code
        |)t
        |""".stripMargin



    val result: DataFrame = spark.sql(spark_sql)

    //4.将数据以json串的形式写入到hdfs
//    result.repartition(6).write.json(outpath_sql)
    result.repartition(1).write.json(outpath_sql)
    spark.stop()
  }
}
