import com.alibaba.fastjson.{JSON, JSONObject}
import com.o2o.utils.Iargs
import com.o2o.utils.times.TimesYearAll
import org.apache.spark.sql.SparkSession
/**
  * @ Auther: o2o-rd-0008
  * @ Date:   2020/6/5 16:23
  * @ Param:  ${PARAM}
  * @ Description: 
  */
object CheckDDMCObsData {
  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder()
      .appName(s"${this.getClass.getSimpleName}")
      .config("spark.debug.maxToStringFields", "2000")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.caseSensitive", "true")
      .config("es.nodes", "192.168.1.29")
      .config("es.port", "9200")
      .config("cluster.name","O2OElastic")
      .config("es.net.http.auth.user", "elastic")
      .config("es.net.http.auth.pass", "changeme")
      .master("local[*]")
      .getOrCreate()

    val sc = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", Iargs.OBSACCESS)
    sc.hadoopConfiguration.set("fs.s3a.secret.key", Iargs.OBSSECRET)
    sc.hadoopConfiguration.set("fs.s3a.endpoint", Iargs.OBSENDPOINT)
    sc.setLogLevel("WARN")

    //平台名称
    var platform = "dingdongmc"
    //当月的月份
    var month = 9
    var lastMonth = 7
    //每个月固定时间戳
    var year = 2020
    var timeStamp = TimesYearAll.TIME202008

    //MongoDB源数据路径
    val sourcePath = s"s3a://o2o-sourcedata/obs-source-${year}/${month}/${platform}/${platform}_${year}_${month}/"

    val index = "247_2020_dingdongmc/dingdongmc_2020_9"
    /*import org.elasticsearch.spark._
    val data1 = sc.esJsonRDD(index,
      """
        |{
        |  "query": {
        |    "match_phrase": {
        |      "firstCategoryId": "10099"
        |    }
        |  }
        |}
      """.stripMargin).values

    spark.read.json(data1).drop("firstCategoryId").drop("secondCategoryId").drop("thirdCategoryId").drop("fourthCategoryId").registerTempTable("sourceTab")

    spark.read.option("delimiter",",").option("header",true).csv("s3a://o2o-dimension-table/category_table/dingdongmc_cate/2020/9/")registerTempTable("cateTab")

    val cateResultDF = spark.sql(
      """
        |select
        |a.*,
        |case when b.firstCategoryId is null then 10099 else b.firstCategoryId end firstCategoryId,
        |case when b.secondCategoryId is null then 1009999 else b.secondCategoryId end secondCategoryId,
        |case when b.thirdCategoryId is null then 100999999 else b.thirdCategoryId end thirdCategoryId,
        |case when b.fourthCategoryId is null then 10099999999 else b.fourthCategoryId end fourthCategoryId
        |from
        |sourceTab a
        |left join
        |cateTab b
        |on a.categoryId = b.categoryId
      """.stripMargin)


    val brand = new brand_join_res
    brand.brandJoinResult(cateResultDF,"s3a://o2o-dataproces-group/zsc/2020/9/dingdongmc/good_10099_final/",year,month,platform,spark)

    println(spark.read.orc("s3a://o2o-dataproces-group/zsc/2020/9/dingdongmc/good_10099_final/").where("firstCategoryId=10099").count())*/




    import org.elasticsearch.spark._
    val value = spark.read.orc("s3a://o2o-dataproces-group/zsc/2020/9/dingdongmc/good_10099_final/").toJSON.rdd.map(lines=>{
      val nObject: JSONObject = JSON.parseObject(lines)

      nObject
    }).saveToEs(index,Map("es.mapping.id"->"good_id"))


/*    import org.elasticsearch.spark._

    val data1 = sc.esJsonRDD(index).values

    val value = spark.read.json(data1).toJSON.rdd.map(line => {
      val nObject = JSON.parseObject(line)
      nObject.toString
    })

    val source: DataFrame = spark.read.json(value)
    source.registerTempTable("source")

    val cate: DataFrame = spark.read.json(sourcePath).dropDuplicates("product_id")

    cate.registerTempTable("cate")

    val df = spark.sql(
      """
        |select
        |a.*,
        |b.subCategoryId as subCategoryIdtmp
        |from
        |source a
        |left join
        |cate b
        |on a.product_id=b.product_id
        |where b.product_id is not null
      """.stripMargin).drop("subCategoryId").withColumnRenamed("subCategoryIdtmp","subCategoryId")
      .drop("firstCategoryId").drop("secondCategoryId").drop("thirdCategoryId").drop("fourthCategoryId")


    val cateDF: DataFrame = spark.read.option("header",true).option("delimiter",",").csv(catePath)
    //    spark.read.json()
    val cateResultDF: DataFrame = cateMatch(spark,df,cateDF)
    val res: Dataset[Row] = cateResultDF.dropDuplicates("good_id")
    res.repartition(2).write.orc("s3a://o2o-dataproces-group/zsc/2020/7/dingdongmc/good_modif/")

    res.toJSON.rdd.map(lines=>{
      val nObject = JSON.parseObject(lines)
      nObject
    }).saveToEs(index,Map("es.mapping.id"->"good_id"))*/


  sc.stop()
}
}
