package com.o2o.regularservice.plat10099

import com.alibaba.fastjson.JSON
import org.elasticsearch.spark._
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
 * @author chenlixiu
 * @version
 * @since JDK 1.8
 */
object UpdateES_10099_sevice {
  val clusterName="hdfs://192.168.2.188:9000"
  val spark = SparkSession.builder()
    .appName("SparkTest")
    .master("local[*]")
    .config("es.nodes", "192.168.2.247")
    .config("es.port", "9200")
    .config("cluster.name","O2OElastic")
    .config("es.net.http.auth.user", "elastic")
    .config("es.net.http.auth.pass", "changeme")
    .getOrCreate()
  val sc = spark.sparkContext
  sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
  sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
  sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
  val sqlcontext = spark.sqlContext
  sc.setLogLevel("ERROR")
 //=================================================

  var year = "2020"
  val month = "6"
//mttg  dzdp
  val platform = "mttg"

  val index = s"${year}_mttg_${month}"  //2019_mttg_
//  val index = s"2019_dazhongdp_${month}"  //2019_mttg_

  val sourcePath_10099 = s"D:\\A_平台处理\\10099\\${year}\\${platform}\\${month}"
  val pipei_Path = s"D:\\A_平台处理\\10099\\${year}\\${platform}\\${month}\\${platform}.csv"
  val gai_Path = s"D:\\A_平台处理\\10099\\${year}\\${platform}\\${month}\\gai"

  def main(args: Array[String]): Unit = {

    //es库中获取10099的数据
    val frame = extractFromEs()
//    TiQu10099(frame)  //1、10099的数据到本地进行匹配

    //2、匹配完进行关联更新
    val frame2 = cate_10099(frame)
    laidUpEs(frame2)

  }

  /***
    * 1、先从ES中提取其他分类的数据 dzdp_2019_0503
    */
  def extractFromEs() : DataFrame= {
    var value = sc.esJsonRDD(index,
      """
        |{
        |  "query": {
        |    "term": {
        |      "firstCategoryId": {
        |        "value": "10099"
        |      }
        |    }
        |  }
        |}
        |
      """.stripMargin).values

    var value3 = value.map(x=>{
      val nObject = JSON.parseObject(x)
      nObject.toString
    })
    val frame = spark.read.json(value3)
    frame
  }

  /**
    * 提取10099到本地
    * @param frame
    */

  def TiQu10099(frame: DataFrame): Unit = {
    frame.registerTempTable("t1")
    spark.sql(
      """
        |select
        |rootCategoryId,
        |rootCategoryName,
        |categoryId,
        |categoryName
        |from
        |t1
        |where firstCategoryId=10099
      """.stripMargin)
      .dropDuplicates("categoryId").repartition(1).write.option("header","true").csv(sourcePath_10099)
  }

  /**
    * 2、美团团购,大众点评 分类处理
    */
  def cate_10099(frame: DataFrame): DataFrame ={
    //es中10099全量原数据
    frame
      .drop("firstCategoryId")
      .drop("secondCategoryId")
      .drop("thirdCategoryId")
      .drop("fourthCategoryId")
//      .show()
      .registerTempTable("detail_source")

    //读取已经匹配好的csv文件
    spark.read.option("header","true").csv(pipei_Path).registerTempTable("gai")
    spark.sqlContext.sql(
      """
        |select
        |standid,
        |categoryId,
        |concat("100",substr(standId,0,2)) as firstCategoryId,
        |concat("100",substr(standId,0,4)) as secondCategoryId
        |from
        |gai
      """.stripMargin
    ).registerTempTable("gai1")

    spark.sqlContext.sql(
      """
        |select
        |standid,
        |categoryId,
        |firstCategoryId,
        |secondCategoryId,
        |case when length(substr(standId,0,6)) >4 then concat("100",substr(standId,0,6)) else concat(secondCategoryId,'99') end thirdCategoryId
        |from
        |gai1
      """.stripMargin
    ).registerTempTable("gai2")

    var oo = spark.sqlContext.sql(
      """
        |select
        |categoryId,
        |firstCategoryId,
        |secondCategoryId,
        |thirdCategoryId,
        |concat(thirdCategoryId,'99') fourthCategoryId
        |from
        |gai2
      """.stripMargin

    )
    oo.dropDuplicates("categoryId").repartition(1).write.json(gai_Path)

    //改好的再关联全量数据进行补全  更新10099
    oo.dropDuplicates("categoryId").registerTempTable("t2")

    var frame2 = spark.sql(
      """
        |select
        |a.*,
        |IFNULL(b.firstCategoryId,"10099") firstCategoryId,
        |IFNULL(b.secondCategoryId,"1009999") secondCategoryId,
        |IFNULL(b.thirdCategoryId,"100999999") thirdCategoryId,
        |IFNULL(b.fourthCategoryId,"10099999999") fourthCategoryId
        |from
        |detail_source a
        |left join
        |t2 b
        |on
        |a.categoryId = b.categoryId
      """.stripMargin)

    frame2
  }


  /***
    * 3、将关联好的数据入库
    * @param
    */
  def laidUpEs(frame : DataFrame): Unit = {
    frame.toJSON.rdd.map(x=>{
      val nObject = JSON.parseObject(x)
      nObject
    })
//      .collect().foreach(println)
//      .saveToEs(s"${platform}_2019_0${month}/type_1",

      .saveToEs(s"${index}/type_1",
              Map("es.mapping.id" -> "good_id",
                  "es.nodes" -> "192.168.2.247",
                  "es.port" -> "9200",
                  "cluster.name" -> "Es-OTO-Data"))
  }



}
