import com.o2o.utils.Iargs
import org.apache.spark.sql.SparkSession
/**
  * @ Auther: o2o-rd-0008
  * @ Date:   2020/6/5 16:23
  * @ Param:  ${PARAM}
  * @ Description: 
  */
object UpdateEsxsp {
  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder()
      .appName(s"${this.getClass.getSimpleName}")
      .config("spark.debug.maxToStringFields", "2000")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.caseSensitive", "true")
      .config("es.nodes", "192.168.1.29")
      .config("es.port", "9200")
      .config("cluster.name","O2OElastic")
      .config("es.net.http.auth.user", "elastic")
      .config("es.net.http.auth.pass", "changeme")
      .master("local[*]")
      .getOrCreate()

    val sc = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", Iargs.OBSACCESS)
    sc.hadoopConfiguration.set("fs.s3a.secret.key", Iargs.OBSSECRET)
    sc.hadoopConfiguration.set("fs.s3a.endpoint", Iargs.OBSENDPOINT)
    sc.setLogLevel("WARN")
//    val index = "wangyiyanxuan_2019_activity/wangyiyanxuan_2019_618"
//    val index = "jumei_2019_activity/jumei_2019_618"
    val index = "miya_2019_activity/miya_2019_618"

    val bakPath =s"s3a://o2o-dataproces-group/zsc/2019618/xpt/${index}"
//    val resultPath = "s3a://dws-data/g_data/618/20190618_2020_co_yoy/wangyiyanxuan/"
//    val resultPath = "s3a://dws-data/g_data/618/20190618_2020_co_yoy/jumei/"
    val resultPath = "s3a://dws-data/g_data/618/20190618_2020_co_yoy/miya/"
//    import org.elasticsearch.spark._
//    val data1 = sc.esJsonRDD(s"${index}").values
//    spark.read.json(data1).write.orc(bakPath)

  /*  val data1 = sc.esJsonRDD(s"${index}").values
//    data1.saveAsTextFile(s"${bakPath}")

    val sourceDF: DataFrame = spark.read.json(data1).drop("address").drop("administrative_region").drop("city").drop("city_grade").drop("city_origin")
      .drop("district").drop("economic_division").drop("district_origin").drop("if_city").drop("if_district").drop("if_state_level_new_areas").drop("kaifaqu")
      .drop("name").drop("poor_counties").drop("province").drop("regional_ID").drop("rural_demonstration_counties").drop("rural_ecommerce").drop("the_belt_and_road_city")
      .drop("the_yangtze_river_economic_zone_city").drop("the_belt_and_road_province").drop("the_yangtze_river_economic_zone_province").drop("urban_agglomerations")

      .drop("secondCategoryId").drop("thirdCategoryId").drop("fourthCategoryId").drop("firstCategoryId").drop("customCategoryId")

    //关联分类
    val frame = miyaCate(spark, sourceDF)
    //关联地址
    val frame1 = miyaAddress(spark, frame)
    //关联品牌
    val brand = new brand_join_res
    //读取老品牌表路径
    val brandTableOld = spark.read.json(s"s3a://o2o-dataproces-group/li_yinchao/Table/2020/6_1/miya/")
    brand.brandJoinResult(frame1,resultPath,brandTableOld,s"s3a://o2o-dataproces-group/li_yinchao/Table/2020/618_test/miya06/",
      s"s3a://o2o-dataproces-group/li_yinchao/Table/newAddBrand/2020/618_test/miya06/",year.toInt,month.toInt,platform,spark)*/
    /** 二、关联分类 swbfirstId,swbfirstName,firstCategoryId,secondCategoryId,thirdCategoryId,fourthCategoryId  */

    /*val data1 = sc.esJsonRDD(s"${index}").values

    val frame: DataFrame = spark.read.json(data1).drop("address").drop("administrative_region").drop("city").drop("city_grade").drop("city_origin")
      .drop("district").drop("economic_division").drop("district_origin").drop("if_city").drop("if_district").drop("if_state_level_new_areas").drop("kaifaqu")
      .drop("name").drop("poor_counties").drop("province").drop("regional_ID").drop("rural_demonstration_counties").drop("rural_ecommerce").drop("the_belt_and_road_city")
      .drop("the_yangtze_river_economic_zone_city").drop("the_belt_and_road_province").drop("the_yangtze_river_economic_zone_province").drop("urban_agglomerations")

      .drop("secondCategoryId").drop("thirdCategoryId").drop("fourthCategoryId").drop("firstCategoryId").drop("customCategoryId")
      .drop("platformName").drop("platformId").drop("platformName_spelling").drop("brandValueId").drop("brandName").drop("brandName_cn").drop("brandName_en")
      .drop("brand_type").drop("timeStamp").drop("").drop("")

    //分类路径
    val catePath = s"s3a://o2o-dimension-table/category_table/wangyiyanxuan_cate/wangyiyanxuan_cate_202006"

    val cateDF: DataFrame = spark.read.option("header", true).option("delimiter", ",").csv(catePath)
      .selectExpr("categoryId", "swbfirstId", "swbfirstName", "firstCategoryId", "secondCategoryId", "thirdCategoryId", "fourthCategoryId").dropDuplicates("categoryId")

    val dataCate: DataFrame = WYYXRelation.parseCate(spark: SparkSession, frame: DataFrame, cateDF: DataFrame)

    val dataAddrDF: DataFrame = WYYXRelation.parseAddress(spark, dataCate, 1560787200)

    dataAddrDF.write.orc(resultPath)*/





   /* spark.read.orc(resultPath).registerTempTable("tab")

    spark.sql(
      """
        |
        |select
        |*
        |from
        |tab
      """.stripMargin).show(10)*/
//    val frame: DataFrame = spark.read.orc(resultPath).drop("state_originCountryName").drop("swb_firstcategoryId").drop("swb_firstcategoryName").drop("swb_products")
//    frame.write.orc("s3a://dws-data/g_data/618/20190618_2020_co_yoy/miya_618/")
   /* frame.toJSON.rdd.map(line=>{
      val nObject: JSONObject = JSON.parseObject(line)
      nObject
    }).saveToEs(s"${index}",
        Map("es.mapping.id" -> "good_id", "es.nodes" -> s"192.168.1.29",
          "es.port" -> "9200",
          "cluster.name" -> "Es-OTO-Data"))*/

  sc.stop()
}
}
