package com.o2o.cleaning.month.platform.ebusiness_plat.nyb

import com.alibaba.fastjson.{JSON, JSONObject}
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.lit
import org.elasticsearch.spark._
import util.GetTitle

object nyb_brand_category_handle {


  def main(args: Array[String]) {

    val spark = SparkSession.builder()
      .appName("test_es")
      .config("spark.debug.maxToStringFields", "2000")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.caseSensitive", "true")
      .config("es.nodes", "192.168.1.29")
      .config("es.port", "9200")
      .config("cluster.name", "O2OElastic")
      .config("es.net.http.auth.user", "elastic")
      .config("es.net.http.auth.pass", "changeme")
      //      .master("local[*]")
      .getOrCreate()

    val sc = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
    sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
    sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
    sc.setLogLevel("WARN")

    val year = 2022
    val monthArr: Array[String] = Array("04")
    //    val monthArr: Array[String] = Array("01", "02", "03")
    //    val monthArr: Array[String] = Array("01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12")
    //    val timestamp: Array[Int] = Array(1548777600, 1551283200, 1553875200, 1556553600, 1559145600, 1561824000, 1564416000, 1567094400, 1569772800, 1572364800, 1575043200, 1577635200)
    //    val timestamp: Array[Int] = Array(1580313600, 1582819200, 1585497600, 1588176000, 1590768000, 1593446400, 1596038400, 1598716800, 1601395200, 1603987200, 1606665600, 1609257600)
    //    val timestamp: Array[Int] = Array(1611936000, 1614441600, 1617033600, 1619712000, 1622304000, 1624982400, 1627574400, 1630252800, 1632931200, 1635523200, 1638201600, 1640793600)
    //    val timestamp: Array[Int] = Array(1643472000, 1645977600, 1648569600)
    val timestamp = 1651248000

    // 2020 年 平台
    //    val indexArr: Array[String] = Array("taobao", "tmall", "suning", "txd", "guomei", "kaola", "yunji", "dangdang", "miya", "jumei", "secoo", "beibeiapp", "womaiwang", "intime", "rongyigou", "palmlife", "tempn_jd", "mryx", "rsmsh", "aomygad")
    //    val indexArr: Array[String] = Array("taobao", "tmall", "jd", "suning", "txd", "guomei", "kaola", "yunji", "dangdang", "miya", "jumei", "secoo", "womaiwang", "rongyigou", "beibeiapp", "intime", "palmlife", "rsmsh", "sifulan")
    //    val indexArr: Array[String] = Array("secoo", "womaiwang", "rongyigou", "beibeiapp", "palmlife")
    //    val indexArr: Array[String] = Array("intime")


    //    val index_TO_holo: Array[String] = Array("9", "10", "5", "3", "2", "4", "29", "8", "11", "6", "19", "21", "26", "58", "67", "68", "69", "71", "76", "56")
    val index_TO_holo: Array[String] = Array("9", "10", "5", "3", "2", "4", "29", "8", "11", "6", "19", "21", "26", "58", "67", "68", "69", "71", "73", "74", "76", "56")

    // 拉取es数据到本地
    //    esToLocat(spark, sc, monthArr, indexArr, year)  // back
    holoSkyToLocat(spark, sc, monthArr, index_TO_holo, year)
    //    holoLandToLocat(spark, sc, monthArr, Array("56"), year)


    //    val indexArr: Array[String] = Array("taobao", "tmall", "jd", "suning", "txd", "guomei", "kaola", "yunji", "dangdang", "miya", "jumei", "secoo", "womaiwang", "rongyigou", "beibeiapp", "intime", "palmlife", "rsmsh", "sifulan", "dewu", "txd")
    val indexArr: Array[String] = Array("taobao", "tmall", "jd", "suning", "txd", "guomei", "kaola", "yunji", "dangdang", "miya", "jumei", "secoo", "womaiwang", "rongyigou", "beibeiapp", "intime", "palmlife", "rsmsh", "sifulan", "douyin", "kuaishou", "dewu", "txd")
    //    val indexArr: Array[String] = Array("txd")
    // 处理
    for (month <- monthArr) {
      for (pingtai <- indexArr) {
        try {
          val value = s"s3a://o2o-dataproces-group/zyf/nyb/source/${year}/${month.toInt}/${pingtai}/"
          //          val value = "D:\\test.json"
          val data = spark.read.json(value).toJSON.rdd.map(lines => {
            val nObject: JSONObject = JSON.parseObject(lines)
            val title = nObject.getOrDefault("title", "-1").toString
            val title_new = GetTitle.getTitle(title)
            val evaluates = nObject.getOrDefault("evaluates", "-1").toString
            nObject.put("evaluates_new", evaluates)
            nObject.remove("evaluates")
            nObject.remove("title")
            nObject.remove("serviceCommitment")
            nObject.remove("nyb_brand")
            nObject.put("title_new", title_new)
            nObject.toString
          })

          spark.read.json(data)
            .withColumnRenamed("title_new", "title")
            .withColumnRenamed("evaluates_new", "evaluates")
            .registerTempTable("all")
          //        spark.read.json("D:\\test.json").registerTempTable("all")

          val category_path = "s3a://o2o-dataproces-group/zyf/nyb/category/*"
          spark.read.option("header", true).csv(category_path)
            .dropDuplicates("title")
            .registerTempTable("category")

          //        val brand_path = "s3a://o2o-dataproces-group/he_xinhui/AAA/nyb/fenlei/*"
          //        spark.read.option("header", true).csv(brand_path)
          //          .dropDuplicates("title").registerTempTable("t2")

          println("value ->" + value)
          println("category_path ->" + category_path)
          //        println("brand_path ->" + brand_path)
          //
          spark.sql(
            """
              |select
              |t1.*
              |,t2.title as nyb_brand
              |from
              |all t1
              |left join
              |category t2
              |on t1.title like concat('%',t2.firstParticiple,'%') and t1.title like concat('%',t2.secondParticiple,'%')
              |and t1.standardlastcategoryid = t2.standardLastCategoryId
              |""".stripMargin)
            .registerTempTable("tmid")

          //        spark.sql(
          //          """
          //            |select * from category where secondParticiple is null
          //            |""".stripMargin).show(100000)

          //        spark.sql(
          //          """
          //            |select * from (select good_id,title,thirdCategoryId,nyb_brand,count(good_id) over(partition by good_id) con from tmid) where con > 1
          //            |""".stripMargin)
          //          .show(false)
          //          .coalesce(1).write.option("header", true).csv(s"D:\\test1\\${month}\\${pingtai}")
          val resultData = spark.sql(
            """
              |select a.*
              |,cast(round(sellcount / con,0) as bigint) as sellCount_new
              |,salesamount / con as salesAmount_new from(
              |   select *,count(good_id) over(partition by good_id) con from tmid
              |)a
              |""".stripMargin)
            .drop("sellcount")
            .drop("salesamount")
            .withColumnRenamed("sellCount_new", "sellCount")
            .withColumnRenamed("salesAmount_new", "salesAmount")
            .withColumnRenamed("standard_brand_id", "brandValueId")
            .withColumnRenamed("standard_brand_name", "brandName_cn")
            .withColumnRenamed("region_id", "regional_ID")
            .withColumnRenamed("platform_id", "platformId")
            .withColumnRenamed("firstcategoryid", "firstCategoryId")
            .withColumnRenamed("secondcategoryid", "secondCategoryId")
            .withColumnRenamed("thirdcategoryid", "thirdCategoryId")
            .withColumnRenamed("fourthcategoryid", "fourthCategoryId")
            .toJSON
          //        resultData.show(false)

          val finalData: RDD[String] = spark.read.json(resultData).toJSON.rdd.map(lines => {
            val nObject: JSONObject = JSON.parseObject(lines)
            nObject.toString
          })

          val platformname = pingtai match {
            case "taobao" => "淘宝"
            case "tmall" => "天猫"
            case "jd" => "京东"
            case "suning" => "苏宁易购"
            case "guomei" => "国美"
            case "kaola" => "考拉"
            case "yunji" => "云集"
            case "dangdang" => "当当"
            case "miya" => "蜜芽"
            case "jumei" => "聚美"
            case "secoo" => "寺库"
            case "womaiwang" => "我买网"
            case "rongyigou" => "融e购"
            case "beibeiapp" => "贝贝APP"
            case "intime" => "喵街"
            case "palmlife" => "掌上生活"
            case "rsmsh" => "日上MSH"
            case "sifulan" => "丝芙兰"
            case "douyin" => "抖音"
            case "kuaishou" => "快手"
            case "txd" => "淘鲜达"
            case "dewu" => "得物"
            case "txd" => "淘鲜达"
          }
          spark.read.json(finalData).select("platformId", "nyb_brand", "sellCount", "salesAmount",
            "brandValueId", "brandName_cn", "province", "city", "district", "regional_ID", "firstCategoryId", "secondCategoryId",
            "thirdCategoryId", "fourthCategoryId")
            .withColumn("platformName", lit(s"${platformname}"))
            // 跑 N 个月用上边的，单月跑用下边的
            //            .withColumn("timeStamp", lit(s"${timestamp(month.toInt - 1)}"))
            .withColumn("timeStamp", lit(s"${timestamp}"))
            .registerTempTable("result")

          spark.sql(
            """
              |select platformId,platformName,nyb_brand,sellCount,salesAmount,brandValueId,brandName_cn,province,city,district,regional_ID,firstCategoryId,secondCategoryId,thirdCategoryId,fourthCategoryId,timeStamp
              |from result
              |""".stripMargin)
            .coalesce(1).write.mode("overwrite").orc(s"s3a://o2o-dataproces-group/zyf/nyb/result/${pingtai}/${year}/${month.toInt}/")
          //            .coalesce(1).write.mode("overwrite").json("D://test123")

        }
        catch {
          case e: Exception => {
            println(e)
          }
        }
        //        val frame1 = spark.sql(
        //          """
        //            |select
        //            |z1.*,
        //            |z2.obs_title as nyb_brand_new
        //            |from
        //            |(select
        //            |*
        //            |from
        //            |t1)z1
        //            |left join
        //            |(select
        //            |t1.title as es_title,
        //            |t2.title as obs_title
        //            |from
        //            |t1
        //            |left join
        //            |t2
        //            |on t1.title like concat('%',t2.title,'%')
        //            |group by es_title,obs_title)z2
        //            |on z1.title = z2.es_title
        //            |where z2.es_title is not null
        //            |""".stripMargin)
        //
        //        frame1.where("nyb_brand_new != 'null'")
        //          .drop("nyb_brand")
        //          .withColumnRenamed("nyb_brand_new","nyb_brand").coalesce(2).write.mode("overwrite").orc(s"s3a://o2o-dataproces-group/zyf/nyb/result/${pingtai}/2021/${month}/one/")
        //
        //        //结果2
        //        frame1.dropDuplicates("nyb_brand").selectExpr("nyb_brand")
        //          .where("nyb_brand is not null and nyb_brand !=''").registerTempTable("a1")
        //
        //        frame1.dropDuplicates("good_id").selectExpr("good_id").registerTempTable("a2")

        //        val frame2 = spark.sql(
        //          """
        //            |select
        //            |z1.*,
        //            |z2.obs_title as nyb_brand_new
        //            |from
        //            |(select
        //            |*
        //            |from
        //            |t1)z1
        //            |left join
        //            |(select
        //            |t1.title as es_title,
        //            |t2.title as obs_title
        //            |from
        //            |t1
        //            |left join
        //            |t2
        //            |on t1.title like concat('%',t2.cityz,'%') and t1.title like concat('%',t2.lei,'%')
        //            |where t2.title not in(select nyb_brand as title from a1)
        //            |and t1.good_id not in(select good_id from a2)
        //            |group by es_title,obs_title)z2
        //            |on z1.title = z2.es_title
        //            |where z2.es_title is not null
        //            |""".stripMargin)
        //        frame2.where("nyb_brand_new != 'null'")
        //          .drop("nyb_brand")
        //          .withColumnRenamed("nyb_brand_new","nyb_brand")
        //          .coalesce(2).write.mode("overwrite").orc(s"s3a://o2o-dataproces-group/zyf/nyb/result/${pingtai}/2021/${month}/two/")

        //        } catch {
        //          case e: Exception => {
        //            println("未知异常")
        //          }
        //        }
      }
    }
  }

  /**
    * 拉取数据
    *
    * @param spark
    * @param sc
    * @param monthArr
    * @param indexArr
    */
  def holoSkyToLocat(spark: SparkSession, sc: SparkContext, monthArr: Array[String], indexArr: Array[String], year: Int) = {
    for (month <- monthArr) {
      for (pingtai <- indexArr) {
        try {
          println(s"拉取平台==> : " + pingtai.toString)
          val shoptab = spark.read.format("jdbc")
            .option("url", "jdbc:postgresql://hgprecn-cn-m7r1xxicx002-cn-beijing.hologres.aliyuncs.com:80/pdb")
            .option("user", "LTAI4Fzm6tCPCiAutd9tXicj")
            .option("driver", "org.postgresql.Driver")
            .option("password", "IPiUVIlP1arYKBX3hETT0MdSkE4qeQ")
            .option("dbtable",
              s"""
                 |(select * from dwd.dwd_skynet_good_mf_${year}${month} where platform_id = '${pingtai}' and price_abnormal_degree < 9
                 |and thirdcategoryid in ('100210203','100210201','100210204','100210206','100210205','100210207','100210302','100210301','100210303','100210305',
                 |'100210202','100210501','100250399','100210106','100210901','100210105','100210102','100210210','100210807','100210802','100240201','100210304',
                 |'100250302','100210208','100210804','100240106','100210803','100210209','100210809','100210806','100210299','100240107','100250311','100250301',
                 |'100250304','100240101','100250314','100210801','100240102','100240202','100210905','100240105','100250305','100250303','100210805','100250316',
                 |'100250306','100210808','100240104','100240103','100210899','100250309','100250310','100210399','100250317','100250308','100250313','100250312','100210810','100250315'
                 |) ) t1
                 |""".stripMargin)
            .option("partitionColumn", "random_int_val")
            .option("numPartitions", "8")
            .option("lowerBound", "0")
            .option("upperBound", "1200000")
            .option("fetchsize", "10000")
            .load() //.show()
          if (!shoptab.isEmpty) {
            val pingtai1 = pingtai match {
              case "9" => "taobao"
              case "10" => "tmall"
              case "5" => "jd"
              case "3" => "suning"
              case "2" => "guomei"
              case "4" => "kaola"
              case "29" => "yunji"
              case "8" => "dangdang"
              case "11" => "miya"
              case "6" => "jumei"
              case "19" => "secoo"
              case "21" => "womaiwang"
              case "26" => "rongyigou"
              case "58" => "beibeiapp"
              case "67" => "intime"
              case "68" => "palmlife"
              case "69" => "rsmsh"
              case "71" => "sifulan"
              case "73" => "douyin"
              case "74" => "kuaishou"
              case "76" => "dewu"
              case "56" => "txd"
            }
            println(s"s3a://o2o-dataproces-group/zyf/nyb/source/${year}/${month}/${pingtai1}/")
            shoptab.coalesce(8).write.mode("overwrite").json(s"s3a://o2o-dataproces-group/zyf/nyb/source/${year}/${month.toInt}/${pingtai1}/")
          }
        }
        catch {
          case e: Exception => {
            println(e)
          }
        }
      }
    }
  }

  /**
    * 拉取数据
    *
    * @param spark
    * @param sc
    * @param monthArr
    * @param indexArr
    */
  def holoLandToLocat(spark: SparkSession, sc: SparkContext, monthArr: Array[String], indexArr: Array[String], year: Int) = {
    for (month <- monthArr) {
      for (pingtai <- indexArr) {
        try {
          println(s"拉取平台==> : " + pingtai.toString)
          val shoptab = spark.read.format("jdbc")
            .option("url", "jdbc:postgresql://hgprecn-cn-m7r1xxicx002-cn-beijing.hologres.aliyuncs.com:80/pdb")
            .option("user", "LTAI4Fzm6tCPCiAutd9tXicj")
            .option("driver", "org.postgresql.Driver")
            .option("password", "IPiUVIlP1arYKBX3hETT0MdSkE4qeQ")
            .option("dbtable",
              s"""
                 |(select * from dwd.dwd_landnet_good_mf_${year}${month} where platform_id = '${pingtai}' and price_abnormal_degree < 9
                 |and thirdcategoryid in ('100210203','100210201','100210204','100210206','100210205','100210207','100210302','100210301','100210303','100210305',
                 |'100210202','100210501','100250399','100210106','100210901','100210105','100210102','100210210','100210807','100210802','100240201','100210304',
                 |'100250302','100210208','100210804','100240106','100210803','100210209','100210809','100210806','100210299','100240107','100250311','100250301',
                 |'100250304','100240101','100250314','100210801','100240102','100240202','100210905','100240105','100250305','100250303','100210805','100250316',
                 |'100250306','100210808','100240104','100240103','100210899','100250309','100250310','100210399','100250317','100250308','100250313','100250312','100210810','100250315'
                 |) ) t1
                 |""".stripMargin)
            .option("partitionColumn", "random_int_val")
            .option("numPartitions", "8")
            .option("lowerBound", "0")
            .option("upperBound", "1200000")
            .option("fetchsize", "10000")
            .load() //.show()
          if (!shoptab.isEmpty) {
            shoptab.coalesce(8).write.mode("overwrite").json(s"s3a://o2o-dataproces-group/zyf/nyb/source/${year}/${month.toInt}/txd/")
          }
        }
        catch {
          case e: Exception => {
            println(e)
          }
        }
      }
    }
  }

  /**
    * 拉取数据
    *
    * @param spark
    * @param sc
    * @param monthArr
    * @param indexArr
    */
  def esToLocat(spark: SparkSession, sc: SparkContext, monthArr: Array[Int], indexArr: Array[String], year: Int) = {
    for (month <- monthArr) {
      for (pingtai <- indexArr) {

        try {
          val index = s"${year}_${pingtai}/${pingtai}_${year}_${month}/"
          println("index -> " + index)
          val value = sc.esJsonRDD(index,
            """
              |{
              |  "size": 0,
              |  "query": {
              |    "bool": {
              |      "must": [
              |        {"bool": {
              |          "should": [
              |           {"terms": {
              |          "thirdCategoryId": [
              |            "100210203",
              |            "100210201",
              |            "100210204",
              |            "100210206",
              |            "100210205",
              |            "100210207",
              |            "100210302",
              |            "100210301",
              |            "100210305"
              |          ]
              |        }}
              |          ]
              |        }}
              |      ]
              |    }
              |  }
              |}
              |""".stripMargin).values

          val frame = spark.read.json(value)

          if (!value.isEmpty) {
            frame.write.mode("overwrite").json(s"s3a://o2o-dataproces-group/zyf/nyb/source/${year}/${month}/${pingtai}/")
          }
        }
        catch {
          case e: Exception => {
            println(e)
          }
        }
      }
    }
  }
}