package com.o2o.cleaning.month.platform.ebusiness_plat.wangyiyanxuan.backup.yanxuan_20190618

import com.alibaba.fastjson.{JSON, JSONObject}
import com.mongodb.spark.MongoSpark
import com.mongodb.spark.config.ReadConfig
import com.mongodb.spark.rdd.MongoRDD
import com.o2o.utils.Iargs
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.bson.Document

import scala.collection.mutable.ArrayBuffer

/**
  * @ Auther: o2o-rd-0008
  * @ Date:   20191/4/18
  * @ Param:  $param$
  * @ Description:  网易严选的销量统计
  *
  *   1.销量：日累计销量
  *   2.分类：swbfirstId,swbfirstName,firstCategoryId,secondCategoryId,thirdCategoryId,fourthCategoryId
  *   3.地址：网易公司的具体地址
  *
  */
object Monitor_WangyiYanxuan_20190610 {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName(s"${this.getClass.getSimpleName}")
      .config("spark.debug.maxToStringFields", "2000")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
//      .config("spark.sql.caseSensitive", "true")
      .master("local[*]")
      .getOrCreate()

    val sc = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", Iargs.OBSACCESS)
    sc.hadoopConfiguration.set("fs.s3a.secret.key", Iargs.OBSSECRET)
    sc.hadoopConfiguration.set("fs.s3a.endpoint", Iargs.OBSENDPOINT)
    sc.setLogLevel("WARN")

    /** *******************  重要    修改 *************************************/


    val platform = "wangyiyanxuan"
    // val startTime = 1559232000  //2019-05-31 0:0:00
    val endTime = 1559836800 //2019-06-09 23:59:59
    val needTime = 1560009600 //2019-06-09 0:0:00

    val year = Iargs.YEAR
    val month = Iargs.MONTH
    val timeStamp = Iargs.TIMESAMP
    val hdfs_188 = "hdfs://192.168.2.188:9000/"

    val readCollection = "yanxuan_detail"
    val readDatabase = "wangYiYanXuan"
    val readUri = "mongodb://root:O2Odata123!@ 192.168.0.149:27017/admin"


    //当月所有数据
    val sourcePath = s"s3a://o2o-sourcedata/obs-source-${year}/${month}/${platform}/${platform}_${year}_${month}"
    //分类路径
    val catePath = s"s3a://o2o-dimension-table/category_table/wangyiyanxuan_cate/wangyiyanxuan_cate_20190${month}"
    //商品的清洗结果路径
    val resultPath = s"s3a://o2o-dataproces-group/wei_jilong/dws-data/esdata/${year}/${platform}/201906_01-09"
    val resultPath1 = s"s3a://dws-data/g_data/618/20190609/wangyiyanxuan/wangyiyanxuan_201906_01-09"
    val source_save_Path = s"s3a://o2o-sourcedata/obs-source-2019/618/1-9/wangyiyanxuan/wangyiyanxuan_201906_01-09"

    /**  读取数据 */
        val sourceRdd: RDD[String] = sc.textFile(source_save_Path)
//    val sourceRdd: RDD[String] = getMongoDBData(spark, readUri = readUri, readDatabase = readDatabase, readCollection = readCollection, timeStamp)
//    sourceRdd.repartition(1).saveAsTextFile(source_save_Path)

    /** 一、销量计算  */
    val dataDF: DataFrame = caculate(spark, sourceRdd, timeStamp.toLong, startTime = 0, endTime = endTime).drop("add_to_field")

    // test:
        println("sellCount<='0'" + dataDF.filter("sellCount<='0'").count())
//        dataDF.selectExpr("good_id", "sellCount", "sellCount_m","salesAmount", "title", "sku_title").filter("sellCount<='0'").orderBy("sellCount").show(100)
//        import spark.implicits._
//        dataDF.selectExpr("good_id", "sellCount", "sellCount_m","salesAmount", "title", "sku_title").sort($"sellCount".desc).show(100)
//        import org.apache.spark.sql.functions._
//        dataDF.filter("sellCount>0").selectExpr("good_id", "sellCount", "sellCount_m","salesAmount", "title", "sku_title").groupBy().agg(count("good_id"), sum("sellCount").as("sellCount"), sum("salesAmount").as("salesAmount")).show()
//        dataDF.filter("sellCount>0").selectExpr("good_id", "sellCount", "sellCount_m","salesAmount", "title", "sku_title").groupBy().agg(count("good_id"), sum("sellCount_m").as("sellCount_m"), sum("salesAmount").as("salesAmount")).show()


    /** 二、关联分类 categoryId,categoryName,rootCategoryId,rootCategoryName,swbfirstId,swbfirstName,firstCategoryId,secondCategoryId,thirdCategoryId,fourthCategoryId */
    val dataCate: DataFrame = dataDF.filter("sellCount>'0' and priceText>'0'").join(spark.read.format("csv").option("header", true).option("delimiter", ",").load(catePath)
      .selectExpr("categoryId", "swbfirstId", "swbfirstName", "firstCategoryId", "secondCategoryId", "thirdCategoryId", "fourthCategoryId")
      .dropDuplicates("categoryId"), Seq("categoryId"), "left").dropDuplicates("good_id")
      dataCate.filter("swbfirstId is null").show()

    /** 三、关联地址  */
    val dataAddrDF: DataFrame = parseAddress(spark, dataCate, needTime).drop("sellCount_m")
//

    /** 保存数据  */
    dataAddrDF.repartition(1).write.orc(resultPath)
    dataAddrDF.drop("base_info").repartition(1).write.orc(resultPath1)

    /** *     入ES   ****/
//    SAVE_2_ES(spark, dataAddrDF, platform, year, month)


    sc.stop()

  }


  /**
    *
    * @param spark
    * @param rDD
    * @param timeStamp
    * @return
    */
  def caculate(spark: SparkSession, rDD: RDD[String], timeStamp: Long, startTime: Long = 0, endTime: Long = 4133951999L): DataFrame = {

    val rdd = rDD.map(line => {
      val nObject: JSONObject = JSON.parseObject(line)
      val add_to_field = nObject.getJSONArray("add_to_field")

      var priceText_Loc = 0D
      var sellCount_Loc = 0L

      import scala.collection.mutable.Map
      val sell_map: Map[Long, Long] = Map()

      val price_temp = ArrayBuffer[Double](0)
      val sell_temp = ArrayBuffer[Long](0)

      if (null != add_to_field) {
        for (i <- 0 to add_to_field.size() - 1) {
          val add_to_field_i: JSONObject = JSON.parseObject(add_to_field.get(i).toString)

          val price_add = add_to_field_i.getOrDefault("priceText", "-1").toString.toDouble
          val sell_add = add_to_field_i.getOrDefault("sellCount", "-1").toString.toLong
          val crawl_date = add_to_field_i.getOrDefault("crawl_date", "-1").toString.toLong

          if (startTime <= crawl_date && crawl_date <= endTime) {
            if (price_add > 0) price_temp += price_add
            if (i == 0) sell_temp += sell_add
            if (sell_add > 0) sell_temp += sell_add
            sell_map.put(crawl_date, sell_add) // 用于乱序时排序用
          }
        }
      }

      val sell_tuple: Seq[(Long, Long)] = sell_map.toSeq.sortBy(_._1)
      if (price_temp.length > 1) priceText_Loc = price_temp.last.formatted("%.2f").toDouble
      if (sell_temp.length > 1) sellCount_Loc = sell_tuple.last._2 - sell_tuple.head._2
      val sellCount_m = if (sell_temp.length > 1) sell_temp.last - sell_temp(1) else 0

      nObject.put("priceText", priceText_Loc)
      nObject.put("sellCount", sellCount_Loc)
      nObject.put("sellCount_m", sellCount_m)
      nObject.put("salesAmount", (priceText_Loc * sellCount_Loc).formatted("%.2f").toDouble)

      nObject.toString
    })

    spark.read.json(rdd)
  }


  /**
    *
    * @param spark
    * @param dataFrame
    * @param timeStamp
    * @return
    */
  def parseAddress(spark: SparkSession, dataFrame: DataFrame, timeStamp: Long): DataFrame = {

    val rdd = dataFrame.toJSON.rdd.map(line => {
      val nObject: JSONObject = JSON.parseObject(line)

      nObject.put("good_id", nObject.get("good_id").toString)
      nObject.put("platformName", "网易严选")
      nObject.put("platformId", "55")
      nObject.put("platformName_spelling", "wangyiyanxuan")
      nObject.put("brandValueId", "wy100001")
      nObject.put("brandName", "网易严选")
      nObject.put("brandName_cn", "网易严选")
      nObject.put("brandName_en", "网易严选")
      nObject.put("timeStamp", s"${timeStamp}")
      nObject.put("brand_type", "国产品牌")
      //      nObject.put("is_oversea", "false")
      nObject.put("good_id", nObject.get("good_id").toString)
      nObject.put("Base_Info", nObject.getOrDefault("Base_Info", "{}").toString)
      nObject.put("goodRatePercentage", nObject.getOrDefault("goodRatePercentage", "-1").toString.replace("%", ""))

      nObject.put("administrative_region", "华东地区")
      nObject.put("city", "杭州市")
      nObject.put("city_grade", "2")
      nObject.put("city_origin", "杭州市")
      nObject.put("district", "滨江区")
      nObject.put("district_origin", "滨江区")
      nObject.put("economic_division", "2")
      nObject.put("if_city", "1")
      nObject.put("if_district", "2")
      nObject.put("if_state_level_new_areas", "0")
      nObject.put("poor_counties", "0")
      nObject.put("province", "浙江省")
      nObject.put("regional_ID", "330108")
      nObject.put("rural_demonstration_counties", "0")
      nObject.put("rural_ecommerce", "0")
      nObject.put("the_belt_and_road_city", "0")
      nObject.put("the_belt_and_road_province", "2")
      nObject.put("the_yangtze_river_economic_zone_city", "1")
      nObject.put("the_yangtze_river_economic_zone_province", "1")
      nObject.put("urban_agglomerations", "1")
      nObject.put("registration_institution", "杭州市高新区（滨江）市场监督管理局")
      nObject.put("address", "浙江省杭州市滨江区长河街道网商路599号4幢410室")
      nObject.put("name", "杭州网易严选贸易有限公司")

      nObject.toString
    })

    spark.read.json(rdd)
  }

  def SAVE_2_ES(spark: SparkSession, dataFrame: DataFrame, platform_Name: String, year: String, month: String) = {

    //    val data_cate = spark.read.json("s3a://o2o-dataproces-group/wei_jilong/private/temp/2019/wangyiyanxuan/0428_03")
    val ress = dataFrame.toJSON.rdd.map(line => {
      val lines = JSON.parseObject(line)
      lines
    })
//
//    println("**********         开始入库" + "\n" + "**********         node_157" + "\n" +
//      s"          ${year}_${platform_Name}/${platform_Name}_${year}_${month}")
//    import org.elasticsearch.spark._
//    ress.saveToEs(s"${year}_${platform_Name}/${platform_Name}_${year}_${month}",
//      Map("es.mapping.id" -> "good_id",
//        "es.nodes" -> "192.168.1.157",
//        "es.net.http.auth.user" -> "elastic",
//        "es.net.http.auth.pass" -> "changeme",
//        "es.port" -> "9200",
//        "cluster.name" -> "O2OElastic"))
//
//    println("\n*****************  已入完  *****************")

  }

  /**
    *
    * @param spark
    * @param readUri
    * @param readDatabase
    * @param readCollection
    * @param timeStamp
    * @return
    */
  def getMongoDBData(spark: SparkSession, readUri: String, readDatabase: String, readCollection: String, timeStamp: String): RDD[String] = {
    val readConfig = ReadConfig(Map("uri" -> readUri, "database" -> readDatabase, "collection" -> readCollection))

    val mongoRDD: MongoRDD[Document] = MongoSpark.load(spark.sparkContext, readConfig)

    val values = mongoRDD.map(line => {
      val nObject: JSONObject = JSON.parseObject(line.toJson())
      nObject.remove("_id")
      //      nObject.put("timeStamp", timeStamp)
      nObject.toString
    })

    values
  }

}
