package tarns_result.others.classify

import common.Tools.{getSpark, ifDirExistsDelete}
import org.apache.spark.sql.DataFrame

object NAVIFREQ_ALL_CLASSIFY_WORLDWEB {
  //按照Indicator进行去重
  def main(args: Array[String]): Unit = {
    val spark = getSpark("NAVIFREQ_ALL_CLASSIFY_WORLDWEB")
    val inputpath_json="hdfs://node01:8020/tmp/InternationalData_Result/HDFSToJson"
    //3用sparkSQL加载这个json文件生成一个DataFrame
    val dataframe=spark.read.json(inputpath_json+"/part-00000")
    dataframe.createTempView("table")
    spark.sql("select * from table").show()
    //4.从这个DataFrame中选择想要的字段
    val spark_sql=
      """
        |select
        |Indicator,
        |IndicatorCategory,
        |Unit,
        |IndicatorLength,
        |WebCode,
        |WebName,
        |SourceCode,
        |SourceName
        |from
        |(
        |select
        |Indicator,
        |IndicatorCategory,
        |Unit,
        |IndicatorLength,
        |WebCode,
        |WebName,
        |SourceCode,
        |SourceName,
        |row_number() over(partition by Indicator order by Unit) num
        |from
        |table
        |)t
        |where num=1
        |""".stripMargin
    val result: DataFrame = spark.sql(spark_sql)


    //5.将数据以json串的形式写入到hdfs
    val outpath_sql="hdfs://node01:8020/tmp/InternationalData_Result/NAVIFREQ_ALL_CLASSIFY_WORLDWEB"
    ifDirExistsDelete(outpath_sql)
    result.repartition(1).write.json(outpath_sql)
    spark.stop()
  }
}

