package com.o2o.cleaning.month.platform.ebusiness_plat.ddmc

import com.alibaba.fastjson.JSON
import com.o2o.cleaning.month.platform.ebusiness_plat.ddmc.DingDongMaiCai.{addressMatch, addressPath}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

object DingdongMc_es247_deal_address {

  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder()
      .appName("SparkTest")
      .master("local[*]")
      .config("es.nodes", "192.168.1.29")
      .config("es.port", "9200")
      .config("es.net.http.auth.user", "elastic")
      .config("es.net.http.auth.pass", "changeme")
      .config("es.batch.write.retry.count", "10")
      .config("es.batch.write.retry.wait", "60")
      .config("cluster.name", "O2OElastic")
      .getOrCreate()
    val sc = spark.sparkContext
    val sqlcontext = spark.sqlContext

    sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
    sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
    sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")

    /** *
      * 需要修改的参数信息
      */
    var year ="2020"
    var month = "10"
    var platform = "dingdongmc"
    var index247 = s"247_${year}_${platform}/${platform}_${year}_${month}"

    import org.elasticsearch.spark._

    val values: RDD[String] = sc.esJsonRDD(index247).values

    val frame1: DataFrame = spark.read.json(values)


    .drop("district_origin").drop("town").drop("address").drop("name").drop("latitude").drop("longitude")
      .drop("registration_institution").drop("administrative_region").drop("city")
      .drop("city_grade").drop("city_origin").drop("economic_division").drop("if_city").drop("if_district")
      .drop("if_state_level_new_areas").drop("poor_counties").drop("province").drop("rural_demonstration_counties")
      .drop("rural_ecommerce").drop("the_belt_and_road_city")
      .drop("the_belt_and_road_province").drop("the_yangtze_river_economic_zone_city")
      .drop("the_yangtze_river_economic_zone_province").drop("urban_agglomerations")
      .drop("aedzId").drop("regional_ID").drop("district")

    //good路径
    //    var goodPath = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/good/"


    //    val frame: DataFrame = spark.read.orc(goodPath)

    println("======匹配地址=======")
    val addressDF: DataFrame = spark.read.json(addressPath)
    val addressResultDF: DataFrame = addressMatch(spark,frame1,addressDF).cache()

    addressResultDF.write.orc(s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/good_final_modify/")


    //入库
    val df = addressResultDF.toJSON.rdd.map(line =>{

      //flavors类型不一致
      val lines = JSON.parseObject(line)
      lines
    })
      .saveToEs(index247, Map("es.mapping.id" -> "good_id"))
  }

}
