package com.o2o.cleaning.month.platform.ebusiness_plat.intime

import com.alibaba.fastjson.{JSON, JSONObject}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.elasticsearch.spark._

object Intime_es29_change {

  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder()
      .appName("Intime_es29")
//      .master("local[*]")
      .config("es.nodes", "192.168.1.29")
      .config("es.port", "9200")
      .config("es.net.http.auth.user", "elastic")
      .config("es.net.http.auth.pass", "changeme")
      .config("es.batch.write.retry.count", "10")
      .config("es.batch.write.retry.wait", "60")
      .config("cluster.name", "O2OElastic")
      .getOrCreate()
    val sc = spark.sparkContext
    val sqlcontext = spark.sqlContext

    sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
    sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
    sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")

    /** *
      * 需要修改的参数信息
      */
    var year ="2020"
    var month = "8"
    var platform = "intime"
    var index247 = s"${year}_${platform}/${platform}_${year}_${month}"
    //good路径
    var goodPath = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/good_bak/"
    var goodfinalPath = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/good_mod_final/"
    val catePath = s"s3a://o2o-dimension-table/category_table/categoryFile_intime/2020/8/*"

    val value: RDD[String] = sc.esJsonRDD(index247).values.map(lines => {
      val nObject: JSONObject = JSON.parseObject(lines)
      nObject.toString
    })

    val frame: DataFrame = spark.read.json(value)



    frame.write.orc(goodPath)

    //关联分类
    val cateDF: DataFrame = spark.read.option("delimiter",",").option("header",true).csv(catePath)

    cateDF.registerTempTable("cateDF")

    spark.sql(
      """
        |select
        |substr(standId,0,5) as firstCategoryId,
        |substr(standId,0,7) as secondCategoryId,
        |standId as thirdCategoryId,
        |concat(standId,'99') as fourthCategoryId,
        |*
        |from
        |cateDF
      """.stripMargin).registerTempTable("cateTab")


    val frame1: DataFrame = frame.drop("firstCategoryId").drop("secondCategoryId").drop("thirdCategoryId").drop("fourthCategoryId")

    frame1.registerTempTable("tab")

    val res = spark.sql(
      """
        |select
        |a.*,
        |case when b.firstCategoryId is not null then b.firstCategoryId else '10099' end firstCategoryId,
        |case when b.secondCategoryId is not null then b.secondCategoryId else '1009999' end secondCategoryId,
        |case when b.thirdCategoryId is not null then b.thirdCategoryId else '100999999' end thirdCategoryId,
        |case when b.fourthCategoryId is not null then b.fourthCategoryId else '10099999999' end fourthCategoryId
        |from
        |tab a
        |left join
        |cateTab b
        |on a.categoryId=b.categoryId
      """.stripMargin)

    res.write.orc(goodfinalPath)

    //入库
    val df = res.toJSON.rdd.map(line =>{

      //flavors类型不一致
      val lines = JSON.parseObject(line)
      lines
    }).saveToEs(index247, Map("es.mapping.id" -> "good_id"))
  }

}
