package com.o2o.kouqiangfnelei

import com.alibaba.fastjson.{JSON, JSONObject}
import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession
import org.elasticsearch.spark._

/**
  * @ Auther: o2o-rd-0008
  * @ Date:   2020/8/4 11:27
  * @ Param:  ${PARAM}
  * @ Description: 
  */
object fenleichaifenEsToObs extends Serializable {

  val categorynames = Array("shuiyaxian", "koupeng")
  val years = Array(2019, 2020, 2021)
  val months1 = Array(1, 2, 3, 4, 5,6,7, 8, 9, 10, 11, 12)
  val months2 = Array(1, 2, 3, 4, 5,6)

  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession.builder()
      .appName("SparkTest")
      .config("spark.debug.maxToStringFields", "2000")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.caseSensitive", "true")
      .config("es.nodes", "192.168.1.29")
      .config("es.port", "9200")
      .config("cluster.name", "O2OElastic")
      .config("es.net.http.auth.user", "elastic")
      .config("es.net.http.auth.pass", "changeme")
      .master("local[*]")
      .getOrCreate()


    val sc = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
    sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
    sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
    sc.setLogLevel("WARN")


    for (categoryname <- categorynames) {
      if (categoryname == "shuiyaxian") {
        for (year <- years) {
          if (year == 2021) {
            for (month <- months2) {
              //  val platformName_syxs = Array("taobao","tmall","jd","suning","guomei","kaola","secoo","dangdang")
              es4Obs2taobao(spark, sc, categoryname, year, month, "taobao")
              es4Obs2taobao(spark, sc, categoryname, year, month, "tmall")
              es4Obs2jd(spark, sc, categoryname, year, month, "jd")
              es4Obs2suning(spark, sc, categoryname, year, month, "suning")
              es4Obs2guomei(spark, sc, categoryname, year, month, "guomei")
              es4Obs2kaola(spark, sc, categoryname, year, month, "kaola")
              es4Obs2secoo(spark, sc, categoryname, year, month, "secoo")
              es4Obs2dangdang(spark, sc, categoryname, year, month, "dangdang")
            }
          } else {
            for (month <- months1) {
              es4Obs2taobao(spark, sc, categoryname, year, month, "taobao")
              es4Obs2taobao(spark, sc, categoryname, year, month, "tmall")
              es4Obs2jd(spark, sc, categoryname, year, month, "jd")
              es4Obs2suning(spark, sc, categoryname, year, month, "suning")
              es4Obs2guomei(spark, sc, categoryname, year, month, "guomei")
              es4Obs2kaola(spark, sc, categoryname, year, month, "kaola")
              es4Obs2secoo(spark, sc, categoryname, year, month, "secoo")
              es4Obs2dangdang(spark, sc, categoryname, year, month, "dangdang")
            }
          }
        }
      } else {
        for (year <- years) {
          if (year == 2021) {
            for (month <- months2) {
              //  val platformName_kps = Array("taobao","tmall","jd","suning","txd","yunji","jumei")
              es4Obs22taobao(spark, sc, categoryname, year, month, "taobao")
              es4Obs22taobao(spark, sc, categoryname, year, month, "tmall")
              es4Obs22jd(spark, sc, categoryname, year, month, "jd")
              es4Obs22suning(spark, sc, categoryname, year, month, "suning")
              es4Obs22txd(spark, sc, categoryname, year, month, "txd")
              es4Obs22yunji(spark, sc, categoryname, year, month, "yunji")
              es4Obs22jumei(spark, sc, categoryname, year, month, "jumei")
            }
          } else {
            for (month <- months1) {
              es4Obs22taobao(spark, sc, categoryname, year, month, "taobao")
              es4Obs22taobao(spark, sc, categoryname, year, month, "tmall")
              es4Obs22jd(spark, sc, categoryname, year, month, "jd")
              es4Obs22suning(spark, sc, categoryname, year, month, "suning")
              es4Obs22txd(spark, sc, categoryname, year, month, "txd")
              es4Obs22yunji(spark, sc, categoryname, year, month, "yunji")
              es4Obs22jumei(spark, sc, categoryname, year, month, "jumei")
            }
          }
        }
      }
    }
    sc.stop()
  }


  def es4Obs2taobao(spark: SparkSession, sc: SparkContext, categoryname: String, year: Int, month: Int, platformName: String) {
    val index = s"${year}_${platformName}/${platformName}_${year}_${month}"
    val data = sc.esJsonRDD(index,
      """
        |{
        |  "size": 1000,
        |  "query": {
        |    "bool": {
        |      "should": [
        |        {
        |          "match_phrase": {
        |            "categoryId": "50008376"
        |          }
        |        },{
        |          "match_phrase": {
        |            "categoryId": "201154510"
        |          }
        |        },{
        |          "match_phrase": {
        |            "categoryId": "201266285"
        |          }
        |        }
        |      ]
        |    }
        |  }
        |}
      """.stripMargin).values
    println(s"-----提取 ：${year}/${month}/${platformName} 的数据 -----")
    if(data.isEmpty()== false){
      spark.read.json(data).repartition(1).write.json(s"s3a://o2o-dataproces-group/zyf/fenleichaifen/${categoryname}/${year}/${month}/${platformName}")
    }
  }

  def es4Obs2jd(spark: SparkSession, sc: SparkContext, categoryname: String, year: Int, month: Int, platformName: String) {
    val index = s"${year}_${platformName}/${platformName}_${year}_${month}"
    val data = sc.esJsonRDD(index,
      """
        |{
        |  "size": 10,
        |  "query": {
        |    "bool": {
        |      "must": [
        |        {
        |          "match_phrase": {
        |            "subCategoryId": "17405"
        |          }
        |        }
        |      ]
        |    }
        |  }
        |}
      """.stripMargin).values
    println(s"-----提取 ：${year}/${month}/${platformName} 的数据 -----")
    if(data.isEmpty()== false){
      spark.read.json(data).repartition(1).write.json(s"s3a://o2o-dataproces-group/zyf/fenleichaifen/${categoryname}/${year}/${month}/${platformName}")
    }
  }

  def es4Obs2suning(spark: SparkSession, sc: SparkContext, categoryname: String, year: Int, month: Int, platformName: String) {
    val index = s"${year}_${platformName}/${platformName}_${year}_${month}"
    val data = sc.esJsonRDD(index,
      """
        |{
        |  "size": 10,
        |  "query": {
        |    "bool": {
        |      "must": [
        |        {
        |          "match_phrase": {
        |            "subCategoryId": "505717"
        |          }
        |        }
        |      ]
        |    }
        |  }
        |}
      """.stripMargin).values
    println(s"-----提取 ：${year}/${month}/${platformName} 的数据 -----")
    if(data.isEmpty()== false){
      spark.read.json(data).repartition(1).write.json(s"s3a://o2o-dataproces-group/zyf/fenleichaifen/${categoryname}/${year}/${month}/${platformName}")
    }
  }

  def es4Obs2guomei(spark: SparkSession, sc: SparkContext, categoryname: String, year: Int, month: Int, platformName: String) {
    val index = s"${year}_${platformName}/${platformName}_${year}_${month}"
    val data = sc.esJsonRDD(index,
      """
        |{
        |  "size": 10,
        |  "query": {
        |    "bool": {
        |      "must": [
        |        {
        |          "match_phrase": {
        |            "subCategoryId": "15985630"
        |          }
        |        }
        |      ]
        |    }
        |  }
        |}
      """.stripMargin).values
    println(s"-----提取 ：${year}/${month}/${platformName} 的数据 -----")
    if(data.isEmpty()== false){
      spark.read.json(data).repartition(1).write.json(s"s3a://o2o-dataproces-group/zyf/fenleichaifen/${categoryname}/${year}/${month}/${platformName}")
    }
  }

  def es4Obs2kaola(spark: SparkSession, sc: SparkContext, categoryname: String, year: Int, month: Int, platformName: String) {
    val index = s"${year}_${platformName}/${platformName}_${year}_${month}"
    val data = sc.esJsonRDD(index,
      """
        |{
        |  "size": 10,
        |  "query": {
        |    "bool": {
        |      "must": [
        |        {
        |          "match_phrase": {
        |            "detailCategoryId": "504"
        |          }
        |        }
        |      ]
        |    }
        |  }
        |}
      """.stripMargin).values
    println(s"-----提取 ：${year}/${month}/${platformName} 的数据 -----")
    if(data.isEmpty()== false){
      spark.read.json(data).repartition(1).write.json(s"s3a://o2o-dataproces-group/zyf/fenleichaifen/${categoryname}/${year}/${month}/${platformName}")
    }
  }

  def es4Obs2secoo(spark: SparkSession, sc: SparkContext, categoryname: String, year: Int, month: Int, platformName: String) {
    val index = s"${year}_${platformName}/${platformName}_${year}_${month}"
    val data = sc.esJsonRDD(index,
      """
        |{
        |  "size": 10,
        |  "query": {
        |    "bool": {
        |      "must": [
        |        {
        |          "match_phrase": {
        |            "subCategoryId": "4095"
        |          }
        |        }
        |      ]
        |    }
        |  }
        |}
      """.stripMargin).values
    println(s"-----提取 ：${year}/${month}/${platformName} 的数据 -----")
    if(data.isEmpty()== false){
      spark.read.json(data).repartition(1).write.json(s"s3a://o2o-dataproces-group/zyf/fenleichaifen/${categoryname}/${year}/${month}/${platformName}")
    }
  }

  def es4Obs2dangdang(spark: SparkSession, sc: SparkContext, categoryname: String, year: Int, month: Int, platformName: String) {
    val index = s"${year}_${platformName}/${platformName}_${year}_${month}"
    val data = sc.esJsonRDD(index,
      """
        |{
        |  "size": 100,
        |  "query": {
        |    "bool": {
        |      "must": [
        |        {"match_phrase": {
        |          "subCategoryName": "口腔护理"
        |        }},{"bool": {
        |          "should": [
        |            {"match_phrase": {
        |              "title": "冲牙器"
        |            }},
        |             {"match_phrase": {
        |              "title": "洗牙器"
        |            }},
        |             {"match_phrase": {
        |              "title": "水牙线"
        |            }}
        |          ]
        |        }}
        |      ]
        |    }
        |  }
        |}
      """.stripMargin).values
    println(s"-----提取 ：${year}/${month}/${platformName} 的数据 -----")
    if(data.isEmpty()== false){
      spark.read.json(data).repartition(1).write.json(s"s3a://o2o-dataproces-group/zyf/fenleichaifen/${categoryname}/${year}/${month}/${platformName}")
    }
  }

  def es4Obs22taobao(spark: SparkSession, sc: SparkContext, categoryname: String, year: Int, month: Int, platformName: String) {
    val index = s"${year}_${platformName}/${platformName}_${year}_${month}"
    val data = sc.esJsonRDD(index,
      """
        |{
        |  "size": 1000,
        |  "query": {
        |    "bool": {
        |      "should": [
        |        {
        |          "match_phrase": {
        |            "categoryId": "121466033"
        |          }
        |        },{
        |          "match_phrase": {
        |            "categoryId": "201176570"
        |          }
        |        }
        |      ]
        |    }
        |  }
        |}
      """.stripMargin).values
    println(s"-----提取 ：${year}/${month}/${platformName} 的数据 -----")
    if(data.isEmpty()== false){
      spark.read.json(data).repartition(1).write.json(s"s3a://o2o-dataproces-group/zyf/fenleichaifen/${categoryname}/${year}/${month}/${platformName}")
    }
  }

  def es4Obs22jd(spark: SparkSession, sc: SparkContext, categoryname: String, year: Int, month: Int, platformName: String) {
    val index = s"${year}_${platformName}/${platformName}_${year}_${month}"
    val data = sc.esJsonRDD(index,
      """
        |{
        |  "size": 1000,
        |  "query": {
        |    "bool": {
        |      "should": [
        |        {
        |          "match_phrase": {
        |            "subCategoryId": "16813"
        |          }
        |        }
        |      ]
        |    }
        |  }
        |}
      """.stripMargin).values
    println(s"-----提取 ：${year}/${month}/${platformName} 的数据 -----")
    if(data.isEmpty()== false){
      spark.read.json(data).repartition(1).write.json(s"s3a://o2o-dataproces-group/zyf/fenleichaifen/${categoryname}/${year}/${month}/${platformName}")
    }
  }

  def es4Obs22suning(spark: SparkSession, sc: SparkContext, categoryname: String, year: Int, month: Int, platformName: String) {
    val index = s"${year}_${platformName}/${platformName}_${year}_${month}"
    val data = sc.esJsonRDD(index,
      """
        |{
        |  "size": 1000,
        |  "query": {
        |    "bool": {
        |      "should": [
        |        {
        |          "match_phrase": {
        |            "subCategoryId": "316566"
        |          }
        |        }
        |      ]
        |    }
        |  }
        |}
      """.stripMargin).values

    val data1 = spark.read.json(data).toJSON.rdd.map(lines=>{
      val nObject = JSON.parseObject(lines)

      // 获取Base_Info
      val base_info:String = nObject.getString("Base_Info")
      if (base_info.isEmpty()==false){
        try {
          val midObj: JSONObject = JSON.parseObject(base_info)
          nObject.put("leibie",midObj.getString("类别"))
        } catch {
          case e:Throwable  => println("good_id"+nObject.getString("good_id"))
        }
      }

      nObject.toString
    })

    println(s"-----提取 ：${year}/${month}/${platformName} 的数据 -----")
    if(data.isEmpty()== false) {
      spark.read.json(data1).where("leibie = '口喷'").drop("leibie").repartition(1).write.json(s"s3a://o2o-dataproces-group/zyf/fenleichaifen/${categoryname}/${year}/${month}/${platformName}")
    }
  }

  def es4Obs22txd(spark: SparkSession, sc: SparkContext, categoryname: String, year: Int, month: Int, platformName: String) {
    val index = s"${year}_${platformName}/${platformName}_${year}_${month}"
    val data = sc.esJsonRDD(index,
      """
        |{
        |  "size": 1000,
        |  "query": {
        |    "bool": {
        |      "should": [
        |        {
        |          "match_phrase": {
        |            "tbCategoryId": "201354306"
        |          }
        |        }
        |      ]
        |    }
        |  }
        |}
      """.stripMargin).values



    println(s"-----提取 ：${year}/${month}/${platformName} 的数据 -----")
    if(data.isEmpty()== false) {
      spark.read.json(data).where("title like '%口喷%' or title like '%口腔喷%' or title like '%口气清新剂%'").repartition(1).write.json(s"s3a://o2o-dataproces-group/zyf/fenleichaifen/${categoryname}/${year}/${month}/${platformName}")
    }
  }

  def es4Obs22yunji(spark: SparkSession, sc: SparkContext, categoryname: String, year: Int, month: Int, platformName: String) {
    val index = s"${year}_${platformName}/${platformName}_${year}_${month}"
    val data = sc.esJsonRDD(index,
      """
        |{
        |  "size": 1000,
        |  "query": {
        |    "bool": {
        |      "should": [
        |        {
        |          "match_phrase": {
        |            "subCategoryId": "210"
        |          }
        |        }
        |      ]
        |    }
        |  }
        |}
      """.stripMargin).values

    println(s"-----提取 ：${year}/${month}/${platformName} 的数据 -----")
    if(data.isEmpty()== false) {
      spark.read.json(data).where("title like '%口喷%' or title like '%口腔喷%' or title like '%口气清新剂%'").repartition(1).write.json(s"s3a://o2o-dataproces-group/zyf/fenleichaifen/${categoryname}/${year}/${month}/${platformName}")
    }
  }

  def es4Obs22jumei(spark: SparkSession, sc: SparkContext, categoryname: String, year: Int, month: Int, platformName: String) {
    val index = s"${year}_${platformName}/${platformName}_${year}_${month}"
    val data = sc.esJsonRDD(index,
      """
        |{
        |  "size": 1000,
        |  "query": {
        |    "bool": {
        |      "should": [
        |        {
        |          "match_phrase": {
        |            "subCategoryId": "402"
        |          }
        |        }
        |      ]
        |    }
        |  }
        |}
      """.stripMargin).values


    println(s"-----提取 ：${year}/${month}/${platformName} 的数据 -----")
    if(data.isEmpty()== false) {
      spark.read.json(data).where("title like '%口喷%' or title like '%口腔喷%' or title like '%口气清新剂%'").repartition(1).write.json(s"s3a://o2o-dataproces-group/zyf/fenleichaifen/${categoryname}/${year}/${month}/${platformName}")
    }
  }


}
