package com.o2o.cleaning.month.platform.ebusiness_plat.tengxunketang

import com.alibaba.fastjson.JSON
import com.o2o.utils.Iargs
import com.o2o.utils.times.TimesYearAll
import org.apache.spark.SparkContext
import org.apache.spark.sql.functions.lit
import org.apache.spark.sql.{DataFrame, SparkSession}

object tengxunketang {
  var platform = "tengxunketang" //平台名称
  var year = "2022" //当月的年份、月份
  var month = "9"
  var last_month = "8"
  var timeStamp = Iargs.TIMESAMP //每个月固定时间戳


  //原始数据
  var sourcePath = s"s3a://o2o-sourcedata-${year}/obs-source-${year}/${month}/${platform}/"
  // 上月原始数据
  //  var last_sourcePath = s"s3a://o2o-sourcedata-2021/obs-source-2021/${last_month}/${platform}/"
  var last_sourcePath = s"s3a://o2o-sourcedata-${year}/obs-source-${year}/${last_month}/${platform}/"

  //商品的清洗结果路径
  var resultPath = s"s3a://o2o-tempdata/zyf/${year}/${month}/${platform}/"
  // dws路径obs://dws-data/split/split_data/2021/tengxunketang/
  var resultPath_dws = s"s3a://dws-data/split/split_data/${year}/${platform}/${month}/"

  def main(args: Array[String]): Unit = {

    //spark mongo连接配置
    val spark = SparkSession.builder()
      .master("local[*]")
      .config("spark.debug.maxToStringFields", "10000")
      .appName("MongoSparkConnectorIntro")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .getOrCreate()
    //obs设置
    val sc: SparkContext = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
    sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
    sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
    sc.setLogLevel("ERROR")


    //清洗计算
    val frame: DataFrame = tengxunketangCaculate(spark, sourcePath)
      .where("sellCount>0").where("priceText>0 ").drop("add_to_field") //and cast(salesAmount as float) < 2000000
      .withColumnRenamed("teacherInfoModule1", "teacherInfoModule")
    frame.registerTempTable("t2")
    println("销售量和销售额：")
    spark.sql(
      """
        |select count(1) count, cast(sum(sellCount) as bigInt) sellsum,cast(sum(salesAmount) as decimal(20,2)) salesum from t2
      """.stripMargin
    ).show()
    //    val frame2: DataFrame = tengxunketangStudyNum(spark, sourcePath, last_sourcePath)

    //打标签
    val s_002 = frame
      .withColumn("timeStamp", lit(s"${timeStamp}"))
      .withColumn("platformName", lit("腾讯课堂"))
      .withColumn("platformId", lit("154"))
      .withColumn("firstcategoryid", lit("10030"))
      .withColumn("secondcategoryid", lit("1003004"))
      .withColumn("thirdcategoryid", lit("100300499"))
      .withColumn("fourthcategoryid", lit("10030049999"))
      .withColumn("name", lit("深圳市腾讯计算机系统有限公司"))
      .withColumn("address", lit("深圳市南山区粤海街道麻岭社区科技中一路腾讯大厦35层"))
      .withColumn("province", lit("广东省"))
      .withColumn("city", lit("深圳市"))
      .withColumn("district", lit("南山区"))
      .withColumn("administrative_region", lit("华南地区"))
      .withColumn("regional_id", lit("440305"))
      .withColumn("shopid", lit("tengxunketang001"))
      .withColumn("shopname", lit("腾讯课堂自营"))
      .withColumn("shoptype", lit("B"))
      .where("sellCount>0").where("priceText>0")
    //商品数据落地
    s_002.repartition(1).write.mode("overwrite").orc(resultPath)
    s_002.repartition(1).write.mode("overwrite").orc(resultPath_dws)

  }


  /**
    * 商品计算
    *
    * @param spark
    * @param sourcePath
    * @return
    */
  def tengxunketangCaculate(spark: SparkSession, sourcePath: String): DataFrame = {
    val tengxunketangxingxuan = spark.read.orc(sourcePath).toJSON.rdd.map(line => {
      val nObject = JSON.parseObject(line)
      /**
        * 提取相关信息
        */
      val teacherInfoModule =
        if (nObject.containsKey("teacherInfoModule")) {
          nObject.getOrDefault("teacherInfoModule", null).toString
        } else null
      val array = nObject.getJSONArray("add_to_field")
      var sellCount = 0
      var lastQuantity = 0
      var priceText = "0"

      try {
        for (i <- 0 to array.size() - 1) {
          val nObject1 = array.getJSONObject(i)
          val sellCount_0 = nObject1.get("totalSell").toString.toInt
          if (i != 0 && (sellCount_0 > lastQuantity)) {
            sellCount += sellCount_0 - lastQuantity
          }
          lastQuantity = sellCount_0

          //最后一天的价格
          if (i + 1 == array.size()) {
            priceText = nObject1.get("priceText").toString
            nObject.put("priceText_collect", priceText)
            //对价格进行计算
            if (priceText.equals("-1")) {
              priceText = "0"
            } else if (priceText.contains("-")) {
              priceText = priceText.split("-")(0)
            } else if (priceText.contains(",")) {
              priceText = priceText.replace(",", "")
            } else if (priceText.contains("\"\"")) {
              priceText = priceText.split("\"\"")(0)
            } else {
              priceText
            }
          }

        }
      } catch {
        case e: Exception => println(e)
          println(nObject)
      }

      if (priceText.contains(",")) {
        priceText = priceText.replace(",", "")
      }


      //***************************修改部分***************************//

      //      val sells: Int = (sellCount * 30 / 27).toInt

      //***************************修改部分***************************//

      //      var salesAmount = (priceText.toDouble * (sellCount)).toDouble.formatted("%.2f")
      var salesAmount = (priceText.toDouble * (sellCount)).toDouble.formatted("%.2f")

      //      nObject.put("sellCount", sells)
      nObject.put("sellCount", sellCount)
      nObject.put("priceText", priceText)
      nObject.put("salesAmount", salesAmount)
      nObject.put("teacherInfoModule1", teacherInfoModule)
      nObject.remove("add_to_field")
      nObject.remove("teacherInfoModule")
      nObject.toString
    })
    spark.read.json(tengxunketangxingxuan)
  }

  //
  //  def tengxunketangStudyNum(spark: SparkSession, sourcePath: String, last_sourcePath: String): DataFrame = {
  //    val lastdata = spark.read.orc(last_sourcePath).toJSON.rdd.map(line =>{
  //      val nObject = JSON.parseObject(line)
  //      nObject.remove("_id")
  //      nObject.toString
  //    })
  //    spark.read.json(lastdata).registerTempTable("lastmonth")
  //    val thisdata = spark.read.orc(sourcePath).toJSON.rdd.map(line =>{
  //      val nObject = JSON.parseObject(line)
  //      nObject.remove("_id")
  //      nObject.toString
  //    })
  //    spark.read.json(thisdata).registerTempTable("thismonth")
  //
  //    // 计算并根据 servicesId 去重
  //    var result = spark.sql(
  //      """
  //        |select sum(servicesStuCount) from(
  //        |select servicesId,(b.servicesStuCount-a.servicesStuCount) as servicesStuCount from
  //        |(select
  //        | servicesId
  //        |,max(servicesStuCount) servicesStuCount
  //        |from
  //        |lastmonth
  //        |group by servicesId
  //        |)a left join(
  //        |select
  //        | servicesId
  //        |,max(servicesStuCount) servicesStuCount
  //        |from
  //        |thismonth
  //        |group by servicesId
  //        |)b on a.servicesId = b.servicesId
  //        |)
  //        |""".stripMargin)
  //return null
  //  }
}
