package com.o2o.cleaning.month.platform.ebusiness_plat.wangyiyanxuan.backup.yanxaun_shuangping_201905

import com.alibaba.fastjson.{JSON, JSONObject}
import com.mongodb.spark.MongoSpark
import com.mongodb.spark.config.ReadConfig
import com.mongodb.spark.rdd.MongoRDD
import com.o2o.utils.{Iargs, time_stamp_util}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.bson.Document

import scala.collection.mutable.{ArrayBuffer, Set}

/**
  * @ Auther: o2o-rd-0008
  * @ Date:   20191/4/18
  * @ Param:  $param$
  * @ Description:
  */
object YanXuan_shuangping_20190510 {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName(s"${this.getClass.getSimpleName}")
      .config("spark.debug.maxToStringFields", "2000")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.caseSensitive", "true")
      .master("local[*]")
      .getOrCreate()

    val sc = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", Iargs.OBSACCESS)
    sc.hadoopConfiguration.set("fs.s3a.secret.key", Iargs.OBSSECRET)
    sc.hadoopConfiguration.set("fs.s3a.endpoint", Iargs.OBSENDPOINT)
    sc.setLogLevel("WARN")

    /** *******************  重要    修改 *************************************/

    val year = Iargs.YEAR
    val month = Iargs.MONTH
    val timeStamp = Iargs.TIMESAMP
    val start_timeStamp = 1556380801 //20190428:00:00:00
    val end_timeStamp = 1557505800 // 2019-05-11 00:00:00

    val platform_Name = "wangyiyanxuan"
    val obs = "s3a://"
    val hdfs_188 = "hdfs://192.168.2.188:9000/"
    val cate_path = "s3a://o2o-dimension-table/category_table/wangyiyanxuan_cate/wangyiyanxuan_cate_20190428"
    val source_path = "s3a://o2o-sourcedata/obs-source-2019/shuangping"
    val part1_path = source_path + "/0510/wangyiyanxuan/wangyiyanxuan_1904/"
    val part2_path = source_path + "/0510/wangyiyanxuan/wangyiyanxuan_20190510"

    //合集
    val part_con_path = source_path + "/0510/wangyiyanxuan/wangyiyanxuan_20190428_0510"

    val need_flag = 1 // 0 1

    /**
      * 合并
      */
    if (need_flag == 0) {
      // 1、先取两个集合所有的id (part1: 5月全量id; part2: 5月没有只在4月出现的) ; 2、part1 关联5月字段, part2关联4月字段
      // 3、4月add_to_field为add_to_field_4,5月add_to_field为add_to_field_5  ; 4、part1：0504-(0428/0501),part2:最后(即0430)减-0428

      val data4 = spark.read.json(part1_path) //.dropDuplicates()
      val data5 = spark.read.json(part2_path)

      val allId_5: DataFrame = data5.selectExpr("good_id", "add_to_field")
      val allId_4: DataFrame = data4.selectExpr("good_id", "add_to_field")
      val all_id: Dataset[Row] = allId_5.selectExpr("good_id").union(allId_4.selectExpr("good_id")).dropDuplicates()
      val id_only4 = all_id.except(allId_5.selectExpr("good_id")).join(allId_4, Seq("good_id"), "left").dropDuplicates()
      val id_only5 = all_id.except(allId_4.selectExpr("good_id")).join(allId_5, Seq("good_id"), "left").dropDuplicates()

      val allData_5 = data5.join(allId_4.selectExpr("good_id", "add_to_field as add_to_field_4", "'4con5' as con_flag"), Seq("good_id"), "left") //.dropDuplicates()
      //      allData_5.repartition(1).write.mode("overwrite").json(part_con_path)
      val allData_only4: Dataset[Row] = id_only4.selectExpr("good_id", "add_to_field as add_to_field_4", "'4' as con_flag").join(data4, Seq("good_id"), "left").dropDuplicates()
      //      allData_only4.repartition(1).write.mode("append").json(part_con_path)
    }


    /**
      * 计算
      */
    if (need_flag == 1) {
      // 1、先取两个集合所有的id (part1: 5月全量id; part2: 5月没有只在4月出现的) ; 2、part1 关联5月字段, part2关联4月字段
      // 3、4月add_to_field为add_to_field_4,5月add_to_field为add_to_field    ; 4、part1：0504-(0428/0501),part2:最后(即0430)减-0428

      val dataFrame: DataFrame = spark.read.json(part_con_path)

      val dataDF = parseCaculate(spark, dataFrame, start_timeStamp, end_timeStamp).filter("is_shuangPin='true'")

      println("sellCount<='0'" + dataDF.filter("sellCount<='0'").count())

      val value: Dataset[Row] = dataDF.filter("sellCount>'0' and priceText>'0'")

      println(value.count())
      value.createOrReplaceTempView("data_df_v")

      spark.sql(

        """
          |select rate,rateSell,totalSellCount,totalSalesAmount from
          |
          |	(
          |	select 100 as joinkey
          |	,sum(sellCount) as totalSellCount
          |	,sum(sellCount*priceText) as totalSalesAmount
          |	from data_df_v
          |	) a
          |
          | left join
          |	(
          |	select sum(original_cost*sellCount-priceText*sellCount)/sum(original_cost*sellCount) as rate,100 as joinkey
          | ,sum(original_cost*sellCount-priceText*sellCount) rateSell
          |	from data_df_v
          |	where original_cost > priceText
          |	) b
          |	on a.joinkey=b.joinkey
        """.stripMargin).show()


      val data_cate: DataFrame = value.join(spark.read.json(cate_path).selectExpr("categoryId", "swbfirstId", "swbfirstName").dropDuplicates(),
        Seq("categoryId"), "left").dropDuplicates("good_id")

      data_cate.repartition(1).write
        .json("s3a://o2o-sourcedata/obs-source-2019/shuangping/0510/wangyiyanxuan/result/wangyiyanxuan_shuangping_0510")
      // save2es(spark,data_cate.drop("add_to_field","add_to_field_4"),platform_Name,year,month)

    }

    sc.stop()
  }


  def parseCaculate(spark: SparkSession, dataFrame: DataFrame, start_timeStamp: Long, end_timeStamp: Long): DataFrame = {

    val dataRDD = dataFrame.toJSON.rdd.map(line => {
      val nObject: JSONObject = JSON.parseObject(line)
      val con_flag = nObject.getOrDefault("con_flag", "5")
      val add_to_field = nObject.getJSONArray("add_to_field")
      val add_to_field_4 = nObject.getJSONArray("add_to_field_4")

      var priceText_Loc = 0D
      var original_cost = 0D
      var sellCount_Loc = 0L
      var is_shuangPin = "false"

      val price_temp = ArrayBuffer[Double](0)
      val sell_temp = ArrayBuffer[Long](0)
      var is_shuangPin_iSet = Set("false")


      if (con_flag == "4" || con_flag == "5") {
        // con_flag == "4" 时 add_to_field 与add_to_field_4 相同
        // con_flag == "5" 时 只有add_to_field 无add_to_field_4
        if (null != add_to_field) {
          for (i <- 0 to add_to_field.size() - 1) {
            val add_to_field_i: JSONObject = JSON.parseObject(add_to_field.get(i).toString)

            val is_shuangPin_i = add_to_field_i.getOrDefault("is_shuangPin", "false").toString
            val price_add = add_to_field_i.getOrDefault("priceText", "-1").toString.toDouble
            val sell_add = add_to_field_i.getOrDefault("sellCount", "-1").toString.toLong
            val crawl_date = add_to_field_i.getOrDefault("crawl_date", "-1").toString.toLong

            is_shuangPin_iSet += is_shuangPin_i
            if (crawl_date >= start_timeStamp && crawl_date < end_timeStamp) {
              if (price_add > 0) price_temp += price_add
              if (i == 0) sell_temp += sell_add
              if (sell_add > 0) sell_temp += sell_add
            }
          }
        }
      }

      if (con_flag == "4con5") {
        if (null != add_to_field_4) {
          for (i <- 0 to add_to_field_4.size() - 1) {
            val add_to_field_i: JSONObject = JSON.parseObject(add_to_field_4.get(i).toString)

            val is_shuangPin_i = add_to_field_i.getOrDefault("is_shuangPin", "false").toString
            val price_add = add_to_field_i.getOrDefault("priceText", "-1").toString.toDouble
            val sell_add = add_to_field_i.getOrDefault("sellCount", "-1").toString.toLong
            val crawl_date = add_to_field_i.getOrDefault("crawl_date", "-1").toString.toLong

            is_shuangPin_iSet += is_shuangPin_i
            if (crawl_date >= start_timeStamp && crawl_date < end_timeStamp) {
              if (price_add > 0) price_temp += price_add
              if (i == 0) sell_temp += sell_add
              if (sell_add > 0) sell_temp += sell_add
            }
          }
        }

        if (null != add_to_field) {
          for (i <- 0 to add_to_field.size() - 1) {
            val add_to_field_i: JSONObject = JSON.parseObject(add_to_field.get(i).toString)

            val is_shuangPin_i = add_to_field_i.getOrDefault("is_shuangPin", "false").toString
            val price_add = add_to_field_i.getOrDefault("priceText", "-1").toString.toDouble
            val sell_add = add_to_field_i.getOrDefault("sellCount", "-1").toString.toLong
            val crawl_date = add_to_field_i.getOrDefault("crawl_date", "-1").toString.toLong

            is_shuangPin_iSet += is_shuangPin_i
            if (crawl_date >= start_timeStamp && crawl_date < end_timeStamp) {
              if (price_add > 0) price_temp += price_add
              if (i == 0) sell_temp += sell_add
              if (sell_add > 0) sell_temp += sell_add
            }
          }
        }
      }

      original_cost = JSON.parseObject(add_to_field.get(add_to_field.size() - 1).toString).getOrDefault("original_cost", "-1").toString.toDouble.formatted("%.2f").toDouble
      if (price_temp.length > 1) priceText_Loc = price_temp.last.formatted("%.2f").toDouble
      if (sell_temp.length > 1) sellCount_Loc = sell_temp.last - sell_temp(1)

      if (is_shuangPin_iSet.exists(x => x.contains("true"))) is_shuangPin = "true"
      nObject.put("is_shuangPin", is_shuangPin)
      nObject.put("priceText", priceText_Loc)
      nObject.put("original_cost", original_cost)
      nObject.put("sellCount", sellCount_Loc)
      nObject.put("salesAmount", (priceText_Loc * sellCount_Loc).formatted("%.2f").toDouble)


      nObject.put("platformName", "网易严选")
      nObject.put("platformName_spelling", "wangyiyanxuan")
      nObject.put("brandValueId", "wy100001")
      nObject.put("brandName", "网易严选")
      nObject.put("brandName_cn", "网易严选")
      nObject.put("brandName_en", "网易严选")
      nObject.put("timeStamp", "1557417600")
      nObject.put("brand_type", "国产品牌")
      //      nObject.put("is_oversea", "false")
      nObject.put("good_id", nObject.get("good_id").toString)
      nObject.put("Base_Info", nObject.getOrDefault("Base_Info", "{}").toString)
      nObject.put("goodRatePercentage", nObject.getOrDefault("goodRatePercentage", "-1").toString.replace("%", ""))

      nObject.put("administrative_region", "华东地区")
      nObject.put("city", "杭州市")
      nObject.put("city_grade", "2")
      nObject.put("city_origin", "杭州市")
      nObject.put("district", "滨江区")
      nObject.put("district_origin", "滨江区")
      nObject.put("economic_division", "2")
      nObject.put("if_city", "1")
      nObject.put("if_district", "2")
      nObject.put("if_state_level_new_areas", "0")
      nObject.put("poor_counties", "0")
      nObject.put("province", "浙江省")
      nObject.put("regional_ID", "330108")
      nObject.put("rural_demonstration_counties", "0")
      nObject.put("rural_ecommerce", "0")
      nObject.put("the_belt_and_road_city", "0")
      nObject.put("the_belt_and_road_province", "2")
      nObject.put("the_yangtze_river_economic_zone_city", "1")
      nObject.put("the_yangtze_river_economic_zone_province", "1")
      nObject.put("urban_agglomerations", "1")
      nObject.put("registration_institution", "杭州市高新区（滨江）市场监督管理局")
      nObject.put("address", "浙江省杭州市滨江区长河街道网商路599号4幢410室")
      nObject.put("name", "杭州网易严选贸易有限公司")

      nObject.toString
    })
    spark.read.json(dataRDD)

  }


  def getMongoDBData(spark: SparkSession, readUri: String, readDatabase: String, readCollection: String, timeStamp: String): DataFrame = {
    val readConfig = ReadConfig(Map("uri" -> readUri, "database" -> readDatabase, "collection" -> readCollection))

    val day = time_stamp_util.timeStamp2yyMMdd((System.currentTimeMillis() / 1000).toString)

    val mongoRDD: MongoRDD[Document] = MongoSpark.load(spark.sparkContext, readConfig)
    val values = mongoRDD.map(line => {
      val nObject: JSONObject = JSON.parseObject(line.toJson())
      nObject.remove("_id")
      nObject.put("timeStamp", timeStamp)
      nObject.toString
    })

    spark.read.json(values)

  }


  def SAVE_2_ES(spark: SparkSession, dataFrame: DataFrame, platform_Name: String, year: Int, month: Int) = {

    //    val data_cate = spark.read.json("s3a://o2o-dataproces-group/wei_jilong/private/temp/2019/wangyiyanxuan/0428_03")
    val ress = dataFrame.toJSON.rdd.map(line => {
      val lines = JSON.parseObject(line)

      lines.put("good_id", lines.get("good_id").toString)
      lines.put("Base_Info", lines.getOrDefault("Base_Info", "{}").toString)
      lines.put("goodRatePercentage", lines.getOrDefault("goodRatePercentage", "-1").toString.replace("%", ""))
      lines
    })
//    import org.elasticsearch.spark._
//    ress.saveToEs(s"wangyiyanxuan_shuangping_0510/type_1",
//      Map("es.mapping.id" -> "good_id",
//        "es.nodes" -> "192.168.2.247",
//        "es.net.http.auth.user" -> "elastic",
//        "es.net.http.auth.pass" -> "changeme",
//        "es.port" -> "9200",
//        "cluster.name" -> "O2OElastic"))
  }

}
