package taobao

import com.alibaba.fastjson.{JSON, JSONObject}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
  * @ Auther: o2o-rd-0008
  * @ Date:   2021/2/9 17:20
  * @ Param:  ${PARAM}
  * @ Description: 
  */
object AddressUpdate {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().appName("getesdata")
//                      .master("local[*]")
      .config("es.nodes", "192.168.1.29")
      //                          .config("es.nodes", "192.168.2.247")
      .config("es.port", "9200")
      .config("cluster.name", "O2OElastic")
      .config("es.net.http.auth.user", "elastic")
      .config("es.net.http.auth.pass", "changeme")
      .getOrCreate()
    val sc = spark.sparkContext
    val ssc = spark.sqlContext
    sc.setLogLevel("WARN")
    sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
    sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
    sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
    import org.elasticsearch.spark._
    val da: RDD[String] = spark.sparkContext.esJsonRDD("2021_taobao/taobao_2021_2").values

    val resultData: RDD[String] = da.map(line => {
      val nObject: JSONObject = JSON.parseObject(line)
      val evaluates: String = nObject.getOrDefault("evaluates", "-1").toString
      nObject.put("evaluates", evaluates)
      nObject.toString
    })

    spark.read.json(resultData).write.orc("s3a://o2o-dataproces-group/zsc/product/taobao/2021/2/getesData_final_orc/")


//    val frame: DataFrame = spark.read.option("header",true).csv("s3a://o2o-dimension-table/address_table/address_table_2021/address_mapping_standard/2021_2/").cache()
    val frame: DataFrame = spark.read.orc("s3a://o2o-dataproces-group/zsc/product/taobao/2021/2/getesData_final_orc/").cache()

    val city_grade: DataFrame = spark.read.option("header",true).csv("s3a://o2o-dimension-table/address_table/address_2021/city_grade/").cache()

    val economic_division: DataFrame = spark.read.option("header",true).csv("s3a://o2o-dimension-table/address_table/address_2021/economic_division/").cache()

    val qidayu: DataFrame = spark.read.json("s3a://o2o-dimension-table/address_table/address_table_2020/12/address_platform/suning_address_2020_12_new/").cache()

    frame.registerTempTable("tab")

    city_grade.registerTempTable("citytab")

    economic_division.registerTempTable("economictab")

    qidayu.dropDuplicates("province","administrative_region").registerTempTable("qidayu")

    val res = spark.sql(
      """
        |select
        |t1.*,
        |t2.city_grade as city_grade_new,
        |t3.economic_division as economic_division_new,
        |t4.administrative_region as administrative_region_new
        |from
        |tab t1
        |left join
        |citytab t2
        |on t1.province=t2.province and t1.city=t2.city
        |left join
        |economictab t3
        |on t1.province = t3.province
        |left join
        |qidayu t4
        |on t1.province = t4.province
      """.stripMargin).drop("city_grade").drop("economic_division").drop("administrative_region")
      .withColumnRenamed("city_grade_new","city_grade")
      .withColumnRenamed("economic_division_new","economic_division")
      .withColumnRenamed("administrative_region_new","administrative_region")


    res.toJSON.rdd.map(lines=>{
      val nObject: JSONObject = JSON.parseObject(lines)
      val evaluates = nObject.get("evaluates").toString
      var ev = "-1"
      var str = new JSONObject
      if (evaluates.equals("-1")) {
        ev = "{\"fuyi\":\"-1\"}"
      } else {
        str = JSON.parseObject(evaluates)
      }
      if (!ev.contains("fuyi")) {
        nObject.put("evaluates", str)
      } else {
        val evs = JSON.parseObject(ev)
        nObject.put("evaluates", evs)
      }
      nObject
    }).saveToEs("2021_taobao/taobao_2021_2",Map("es.mapping.id" -> "good_id"))



  }

}
