package com.o2o.cleaning.month.platform.ebusiness_plat.zaixianjy_yl.yiliao

import com.alibaba.fastjson.JSON
import org.apache.spark.SparkContext
import org.apache.spark.sql.{DataFrame, SparkSession}

import scala.collection.mutable

object haodaifu {

  var platform = "haodaifu" //平台名称
  var year = "2021" //当月的年份、月份
  var month = "7"

  //商品的清洗结果路径
  // dws-data/split/split_data/2021/tengxunketang/7/
  var resultPath = s"s3a://dws-data/split/split_data/${year}/${platform}/${month}/"

  def main(args: Array[String]): Unit = {
    //spark mongo连接配置
    val spark = SparkSession.builder()
      .master("local[*]")
      .config("spark.debug.maxToStringFields", "10000")
      .appName("MongoSparkConnectorIntro")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .getOrCreate()
    //obs设置
    val sc: SparkContext = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
    sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
    sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
    sc.setLogLevel("ERROR")

//    val fram = zaixianyiliaoCaculate(spark, resultPath).registerTempTable("t1")
    spark.read.orc(s"s3a://o2o-dataproces-group/zyf/2022/1/haodaifu_guahao/1/").registerTempTable("t1")
    spark.sql(
      """
        |select * from t1
        |""".stripMargin)
      .show(false)
  }

  def zaixianyiliaoCaculate(spark: SparkSession, sourcePath: String): DataFrame = {
    val date = spark.read.orc(resultPath).toJSON.rdd.map(line => {

      val Object = JSON.parseObject(line)
      var teacherCount = 0
      // 获取 teacherInfoModule
      val teacherArray =
        if (Object.containsKey("teacherInfoModule")) {
          Object.getJSONArray("teacherInfoModule").toArray()
        } else {
          teacherCount = 0
          null
        }
      // 创建一个hashset存放teacherId
      val teachers = new mutable.HashSet[String]()
      try {
        if (teacherArray != null) {
          for (i <- 0 to teacherArray.length - 1) {
            val teacherObject = JSON.parseObject(teacherArray(i).toString)
            val teacherId = if (teacherObject.containsKey("teacherId")) {
              teacherObject.getOrDefault("teacherId", null).toString
            } else null
            if (teacherId != null && teacherId != "") {
              teachers.add(teacherId)
            }
          }
          teacherCount = teachers.size
        }
      } catch {
        case e: Exception => println(e)
          println(Object.getString("good_id") + "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
      }
      Object.put("teacherCount", teacherCount)
      Object.toString
    })
    spark.read.json(date)
  }

}
