package com.o2o.cleaning.month.platform.ebusiness_plat.wangyiyanxuan

import com.alibaba.fastjson.JSON
import org.apache.spark.sql.SparkSession
import org.elasticsearch.spark.{sparkContextFunctions, sparkRDDFunctions}

object  join_newCate_To_ES2 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().appName("Other")
      .master("local[*]")
      .config("es.nodes", s"192.168.1.29")
      .config("es.port", "9200")
      .config("cluster.name", "O2OElastic")
      .getOrCreate()
    spark.sparkContext.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
    spark.sparkContext.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
    spark.sparkContext.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
    val index = "2021_wangyiyanxuan/wangyiyanxuan_2021_4"
//    val index = "2021_txd/txd_2021_2"
//    val index = "2021_secoo/secoo_2021_4"
    val values = spark.sparkContext.esJsonRDD(s"${index}",
      """
        |{
        |  "size": 0,
        |  "query": {
        |    "bool": {
        |      "should": [
        |        {
        |          "match_phrase": {
        |            "subCategoryId": "109295002"
        |          }
        |        },{
        |          "match_phrase": {
        |            "subCategoryId": "109295009"
        |          }
        |        },{
        |          "match_phrase": {
        |            "subCategoryId": "109295008"
        |          }
        |        },{
        |          "match_phrase": {
        |            "subCategoryId": "109291002"
        |          }
        |        }
        |      ]
        |    }
        |  }
        |}
              """.stripMargin).values
    spark.read.json(values).registerTempTable("esData")

//    val values = spark.sparkContext.esJsonRDD(s"${index}",
//      """
//        |{
//        |  "query": {
//        |    "bool": {
//        |      "must_not": [
//        |        {
//        |         "bool": {
//        |           "filter": {
//        |             "exists": {
//        |               "field": "firstCategoryId"
//        |             }
//        |           }
//        |         }
//        |        }
//        |      ]
//        |    }
//        |  }
//        |}
//              """.stripMargin).values
//    spark.read.json(values).registerTempTable("esData")
    //读取添加的数据standId
    spark.read.option("header",true).csv("D:\\test\\网易严选分类_202104.csv").dropDuplicates("categoryId").registerTempTable("cateTable")

    val frame = spark.sql(
      """
        |select a.*,
        |b.firstCategoryId as first,
        | b.secondCategoryId as second,
        | b.thirdCategoryId as third,
        | b.fourthCategoryId as fourth
        | from esData as a
        |
        |left join cateTable as b on
        |a.subCategoryId = b.categoryId
        | where b.categoryId is not null
        |""".stripMargin)
      .drop("firstCategoryId", "secondCategoryId", "thirdCategoryId", "fourthCategoryId")
      .withColumnRenamed("first", "firstCategoryId")
      .withColumnRenamed("second", "secondCategoryId")
      .withColumnRenamed("third", "thirdCategoryId")
      .withColumnRenamed("fourth", "fourthCategoryId")


    val value1 = frame.toJSON.rdd.map(line => {
      val lines = JSON.parseObject(line)
      lines
    })

    value1.saveToEs(s"${index}"
      , Map("es.mapping.id" -> "good_id"))

    spark.stop()
  }
}
