package com.o2o.cleaning.month.platform.ebusiness_plat.wangyiyanxuan

import com.alibaba.fastjson.{JSON, JSONObject}
import com.o2o.utils.Iargs
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

import scala.collection.mutable.ArrayBuffer

/**
  * @ Auther: o2o-rd-0008
  * @ Date:   20191/4/18
  * @ Param:  $param$
  * @ Description:  网易严选的销量统计
  *
  *   1.销量：日累计销量
  *   2.分类：swbfirstId,swbfirstName,firstCategoryId,secondCategoryId,thirdCategoryId,fourthCategoryId
  *   3.地址：网易公司的具体地址
  *
  */
object WangyiYanxuanApp_618 {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName(s"${this.getClass.getSimpleName}")
      .config("spark.debug.maxToStringFields", "2000")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      //      .config("spark.sql.caseSensitive", "true")
      .master("local[*]")
      .getOrCreate()

    val sc = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", Iargs.OBSACCESS)
    sc.hadoopConfiguration.set("fs.s3a.secret.key", Iargs.OBSSECRET)
    sc.hadoopConfiguration.set("fs.s3a.endpoint", Iargs.OBSENDPOINT)
    sc.setLogLevel("WARN")

    /** *******************  重要    修改   start   ************************************/

    val year = Iargs.YEAR
    val month = Iargs.MONTH
    //Iargs.MONTH
    val timeStamp = "1623945600"
//    val timeStamp = Iargs.TIMESAMP

    val platform = "wangyiyanxuan"
    val endTime: Long = 4133951999L //末日

    //打时间戳：needTime
    val needTime = timeStamp.toLong
    //分类路径
    val catePath = s"s3a://o2o-dimension-table/category_table/wangyiyanxuan_cate/wangyiyanxuan_cate_20210${month}20"
//    val catePath = s"s3a://o2o-dimension-table/category_table/wangyiyanxuan_cate/wangyiyanxuan_cate_20210${month}18"
    //当月所有数据
    val sourcePath = s"s3a://o2o-sourcedata-2021/obs-source-2021/620/${platform}/"
//    val sourcePath = s"s3a://o2o-sourcedata-2021/obs-source-2021/615/${platform}/"
    //商品的清洗结果路径
    val resultPath = s"s3a://o2o-dataproces-group/zyf/${year}/${month}20/${platform}/"
//    val resultPath = s"s3a://o2o-dataproces-group/zyf/${year}/${month}18/${platform}/"

    /** *******************  重要    修改   end   ************************************/


    /** 读取数据 */
      val sourceRdd: RDD[String] = sc.textFile(sourcePath)

    //"swb_schame"."swb_cate_allstand"（商务部分类表）   swbfirstName  swbfirstId  新增分类匹配
    val cateDF: DataFrame = spark.read.option("header", true).option("delimiter", ",").csv(catePath)
      .selectExpr("categoryId", "swbfirstId", "swbfirstName", "firstCategoryId", "secondCategoryId", "thirdCategoryId", "fourthCategoryId").dropDuplicates("categoryId")

    /** 一、销量计算  */
    val dataDF_temp: DataFrame = caculate(spark, sourceRdd, timeStamp.toLong, startTime = 0, endTime = endTime).drop("add_to_field")
    val dataDF = dataDF_temp.filter("sellCount>'0' and priceText>'0'").drop("sellCount_m")

    /** 测试销量  */
    // test:
    //        println("sellCount<='0'" + dataDF.filter("sellCount<='0'").count())
    //        dataDF.selectExpr("good_id", "sellCount", "sellCount_m","salesAmount", "title", "sku_title").filter("sellCount<='0'").orderBy("sellCount").show(100)
    //        dataDF.selectExpr("good_id", "sellCount", "sellCount_m","salesAmount", "title", "sku_title").sort($"sellCount".desc).show(100)
    //        import org.apache.spark.sql.functions._
    //        dataDF.filter("sellCount>0").selectExpr("good_id", "sellCount", "sellCount_m","salesAmount", "title", "sku_title").groupBy().agg(count("good_id"), sum("sellCount").as("sellCount"), sum("salesAmount").as("salesAmount")).show()
    //        dataDF.filter("sellCount>0").filter("sellCount>0").selectExpr("categoryId").dropDuplicates().filter("categoryId = '109261041'").show()
    //        dataDF.filter("sellCount>0").selectExpr("good_id", "sellCount", "sellCount_m","salesAmount", "title", "sku_title")
    //              .groupBy().agg(count("good_id"), sum("sellCount_m").as("sellCount_m"), sum("salesAmount").as("salesAmount")).show()

    //println(5/0)


    /** 二、关联分类 swbfirstId,swbfirstName,firstCategoryId,secondCategoryId,thirdCategoryId,fourthCategoryId  */
    val dataCate: DataFrame = WYYXRelation.parseCate(spark: SparkSession, dataDF: DataFrame, cateDF: DataFrame)
    dataCate.filter("swbfirstId is null").show()

    val show = dataCate.selectExpr("firstCategoryId", "secondCategoryId", "thirdCategoryId", "fourthCategoryId").dropDuplicates()
    show.filter("firstCategoryId is null").show()
    println("firstCategoryId is null  : ")
    dataCate.filter("firstCategoryId is null").show()

    /** 三、关联地址  并添加平台必需字段 */
    val dataAddrDF: DataFrame = WYYXRelation.parseAddress(spark, dataCate, needTime)
    /** 保存数据  */
        dataAddrDF.repartition(1).write.orc(resultPath)


//    val dataAddrDF: DataFrame = spark.read.orc(resultPath)

    dataAddrDF.registerTempTable("tab")

    spark.sql(
      """
        |
        |select
        |count(1),
        |sum(sellCount),
        |sum(salesAmount)
        |from
        |tab
      """.stripMargin).show(false)



    /** 入ES  ****/
//        SAVE_2_ES(spark, spark.read.orc(resultPath), platform, year, month)


    sc.stop()

  }

  /**
    * 入ES库
    *
    * @param spark
    * @param dataFrame
    * @param platform_Name
    * @param year
    * @param month
    */
  def SAVE_2_ES(spark: SparkSession, dataFrame: DataFrame, platform_Name: String, year: String, month: String) = {
    val ress = dataFrame.toJSON.rdd.map(line => {
      val lines = JSON.parseObject(line)
      lines
    })
    println("**********         开始入库" + "\n" + "**********         node_157" + "\n" +
      s"          ${year}_${platform_Name}/${platform_Name}_${year}_${month}")
    import org.elasticsearch.spark._

    val index = s"${year}_${platform_Name}/${platform_Name}_${year}_${month}"

    ress.saveToEs(index,
      Map("es.mapping.id" -> "good_id",
        "es.nodes" -> "192.168.1.29",
        "es.net.http.auth.user" -> "elastic",
        "es.net.http.auth.pass" -> "changeme",
        "es.port" -> "9200",
        "cluster.name" -> "O2OElastic"))

    println("\n*****************  已入完  *****************")

  }


  def caculate(spark: SparkSession, rDD: RDD[String], timeStamp: Long, startTime: Long = 0, endTime: Long = 4133951999L): DataFrame = {

    val rdd = rDD.map(line => {
      val nObject: JSONObject = JSON.parseObject(line)
      val add_to_field = nObject.getJSONArray("add_to_field")

      if (null != add_to_field) {
        var priceText_Loc = 0D
        var sellCount_Loc = 0L

        import scala.collection.mutable.Map
        val sell_map: Map[Long, Long] = Map()

        val price_temp = ArrayBuffer[Double](0)
        val sell_temp = ArrayBuffer[Long](0)


        for (i <- 0 to add_to_field.size() - 1) {
          val add_to_field_i: JSONObject = JSON.parseObject(add_to_field.get(i).toString)

          val price_add = add_to_field_i.getOrDefault("priceText", "-1").toString.toDouble
          val sell_add = add_to_field_i.getOrDefault("sellCount", "-1").toString.toLong
          val crawl_date = add_to_field_i.getOrDefault("crawl_date", "-1").toString.toLong

          if (startTime <= crawl_date && crawl_date <= endTime) {
            if (price_add > 0) price_temp += price_add
            if (i == 0) sell_temp += sell_add
            if (sell_add > 0) sell_temp += sell_add
            sell_map.put(crawl_date, sell_add) // 用于乱序时排序用
          }
        }

        val sell_tuple: Seq[(Long, Long)] = sell_map.toSeq.sortBy(_._1)
        if (price_temp.length > 1) priceText_Loc = price_temp.last.formatted("%.2f").toDouble
        if (sell_temp.length > 1) sellCount_Loc = sell_tuple.last._2 - sell_tuple.head._2
//        if (sell_temp.length > 1) sellCount_Loc = (sell_tuple.last._2 - sell_tuple.head._2)+(sell_tuple.last._2 - sell_tuple.head._2)/15*3
        val sellCount_m = if (sell_temp.length > 1) sell_temp.last - sell_temp(1) else 0

        //***************************修改部分***************************//

        //        val sells: Int = (sellCount_Loc * 30 / 27).toInt

        //***************************修改部分***************************//
//        if (sellCount_Loc > 10) sellCount_Loc = (sellCount_Loc / 30.0 * 18.0).toInt

        nObject.put("priceText", priceText_Loc)
        //        nObject.put("sellCount", sells)
        nObject.put("sellCount", sellCount_Loc)
        //        nObject.put("sellCount_m", sells)
        nObject.put("sellCount_m", sellCount_m)
        //        nObject.put("salesAmount", (priceText_Loc * sells).formatted("%.2f").toDouble)
        nObject.put("salesAmount", (priceText_Loc * sellCount_Loc).formatted("%.2f").toDouble)

      }

      nObject.toString
    })

    spark.read.json(rdd)
  }

}
