package com.leal.client

import com.alibaba.fastjson.JSON
import com.leal.util.{JdbcUtil, SparkLoggerTrait, SparkUtil}
import org.apache.log4j.Logger
import org.apache.spark.rdd.RDD
import org.apache.spark.sql._
import org.apache.spark.sql.functions.{lit, log}
import org.apache.spark.sql.types.{ArrayType, DecimalType, FloatType, IntegerType, MapType, StringType}

import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer

/**
 * @Classname HelloWord
 * @Description spark 3
 * @Date 2022/12/22 18:26
 * @Created by leal123
 */
object HelloWord extends SparkLoggerTrait {

  val logger: Logger = Logger.getLogger(this.getClass)

  def main(args: Array[String]): Unit = {
    // set args

    //operation
    val spark: SparkSession = SparkUtil.initSpark(enableHive = true)
    val frame: DataFrame = spark.sql("select  name,  phone,  address from cx_ads_safe.mysql_person;")
    frame.show()
    frame.printSchema()

    dataToMysql(frame)

    spark.close()
  }

  private def dataToMysql(frame: DataFrame): Unit = {
    val sampleFrame: Dataset[Row] = frame.sample(fraction = 0.1)
    val count: Long = sampleFrame.count()
    val batchSize: Int = math.ceil(count / 10).toInt
    logger.info(s"数据记录count: $count, batchSize: $batchSize")
    sampleFrame.repartition(10).mapPartitions((rows: Iterator[Row]) => Iterator(rows.size))(Encoders.scalaInt).collect().foreach(println)
    sampleFrame.repartition(10).foreachPartition((row: Iterator[Row]) => {
      JdbcUtil.upsertData(row, "LOCAL_MYSQL", "es_student", Seq("name", "phone", "address"), batchSize)
    })
  }

  def isSerializable(obj: Any): Boolean = {
    try {
      //filterData.isInstanceOf[Serializable]
      val ois = new java.io.ObjectOutputStream(new java.io.ByteArrayOutputStream())
      ois.writeObject(obj)
      true
    } catch {
      case e: Exception => false
    }
  }

  private def dataToMap(frame: DataFrame): RDD[mutable.Map[String, String]] = {
    frame.rdd.map((row: Row) => {
      val res: mutable.Map[String, String] = mutable.Map[String, String]()
      for (col <- row.schema.fieldNames) {
        var value: String = ""
        if (!row.isNullAt(row.schema.fieldIndex(col))) {
          value = row.schema(col).dataType match {
            case IntegerType => row.getAs[Int](col).toString
            case FloatType => row.getAs[Float](col).toString
            case DecimalType() => row.getAs[java.math.BigDecimal](col).toString
            case ArrayType(StringType, true) =>
              // 给array<string> 中每个string 加上“”
              val valueArray: mutable.WrappedArray[String] = row.getAs[mutable.WrappedArray[String]](col)
              println(valueArray.length)
              s"[${valueArray.map((v: String) => s"元素${v}").mkString(",")}]"

            case MapType(keyType, valueType, valueContainsNull) => row.getAs[mutable.Map[String, String]](col).mkString(",")
            case _ => row.getAs(col)
          }
        }
        res.put(col, value)
      }
      res
    })
  }

  def dataToJson(frame: DataFrame, spark: SparkSession) = {
    // frame 转换成json
    val result: Dataset[String] = frame.toJSON
    result.show(truncate = false)

    import spark.implicits._
    val value: RDD[(String, String)] = result.map((json: String) => {
      //rowkey value
      val rowkey: String = JSON.parseObject(json).getOrDefault("id", "-").toString
      (rowkey, json)
    }).rdd

    println("rdd 数据情况如下")
    value.foreach(println)
  }

  def insertPartitionTable(spark: SparkSession, tableName: String, partition: String) = {
    // 创建dataframe,schema 为ID，name
    val frame: DataFrame = spark.createDataFrame(Seq((1, "name111222"), (2, "name44111222"))
    ).toDF("id", "name")

    // 该方法会清空数据，只保留最新插入的分区
    //    frame.withColumn("dt",lit(partition))
    //      .write.mode(SaveMode.Overwrite).partitionBy("dt").saveAsTable(tableName)

    spark.conf.set("spark.sql.sources.partitionOverwriteMode", "dynamic")
    frame.withColumn("dt", lit(partition))
      .write
      .mode(SaveMode.Overwrite)
      .insertInto(tableName)
  }
}