package com.leal.client

import com.leal.util.{JdbcUtil, SparkLoggerTrait, SparkUtil}
import org.apache.commons.io.FileUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.dsl.expressions.intToLiteral
import org.apache.spark.sql.types.{ArrayType, DataType, StringType, StructField, StructType}


import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer

/**
 * @Classname HelloWord
 * @Description spark 3
 * @Date 2022/12/22 18:26
 * @Created by leal123
 */
object CrowdDeal extends SparkLoggerTrait {
  def main(args: Array[String]): Unit = {
    // set args

    //operation
    val spark: SparkSession = SparkUtil.initSpark(enableHive = true)
    val frame: DataFrame = spark.sql("select * from cx_ads_safe.crowd_labels_result;")
    frame.show()
    frame.printSchema()
    // 遍历frame各个列信息，根据类型不同读取不同的值

    val value1: RDD[mutable.Map[String, String]] = frame.rdd.map(
      (row: Row) => {
        val res: mutable.Map[String, String] = mutable.Map[String, String]()
        for (col <- row.schema.fieldNames) {
          var value: String = ""
          if (!row.isNullAt(row.schema.fieldIndex(col))) {
            value = row.schema(col).dataType match {
              case StringType => row.getAs(col)
              // 只要是数据类型，都要转化为string
              case ArrayType(StringType, true) => row.getAs[mutable.WrappedArray[String]](col).mkString(",")
              case _ => row.getAs(col)
            }
          }
          res.put(col, value)
        }
        res
      }
    )
    value1.foreach(println)
    spark.close()

  }

  def toEs(spark: SparkSession, frame: DataFrame): Unit = {
    val conf: RuntimeConfig = spark.conf
    //增加es 配置
    conf.set("es.nodes", "127.0.0.1")
    conf.set("es.port", "9200")
    conf.set("es.index.auto.create", "true")
    conf.set("es.nodes.wan.only", "true")
    conf.set("es.batch.size.bytes", "1000000")
    conf.set("es.batch.write.retry.wait", "5000")
    conf.set("es.batch.write.retry.count", "10")
    conf.set("es.batch.write.refresh", "false")
    conf.set("es.batch.write.concurrent.requests", "10")

    //打印spark的配置信息

    val rdd: RDD[(String, Map[String, Any])] = dataFrameToEsMap(frame, "id")
    rdd.foreach(println)

    //写入es
    //EsSpark.saveToEs(rdd, "up_test_index_20230825/doc")
  }

  private def dataFrameToEsMap(dataFrame: DataFrame, esIdColumn: String = null): RDD[(String, Map[String, Any])] = {
    // 获取DataFrame字段列表
    val fields: Array[String] = dataFrame.schema.fields.map((field: StructField) => field.name)

    // 将DataFrame转换为RDD[Row]
    val rdd: RDD[Row] = dataFrame.rdd

    // 过滤掉hive中为空的数据
    val filteredRDD: RDD[Row] = rdd.filter((row: Row) => {
      fields.exists((field: String) => row.getAs(field) != null && row.getAs(field).toString.trim.nonEmpty)
    })

    // 将RDD[Row] 转换为Seq[(String, Map[String, Any])]
    filteredRDD.map((row: Row) => {
      val esId: String = row.getAs[String](esIdColumn)
      val esContentMap: Map[String, Any] = fields.filterNot((_: String) == esIdColumn).flatMap((field: String) => {
        if (row.getAs[Any](field) != null && row.getAs[Any](field).toString.trim.nonEmpty) {
          Some((field, row.getAs[Any](field)))
        } else {
          None
        }
      }).toMap
      (esId, esContentMap)
    })
  }

  def stringToMysql(value: String): Unit = {
    //save mysql
    // 看一下存储的格式是否需要修改为list 的形式减少存储空间的占用
    JdbcUtil.operateStatement(JdbcUtil.getConnection("LOCAL_MYSQL"), "INSERT",
      "INSERT INTO ug_test(id,value) VALUES(?,?);",
      "1001", "[" + value + "]")
  }

  def tempInfo(spark: SparkSession): Unit = {
    val sql = "select * from leal_test.student_info;"
    val frame: DataFrame = SparkUtil.getDataFrameBySql(spark, sql)
    // frame.printSchema()
    frame.show()
    // frame to json
    val result: Dataset[String] = frame.toJSON
    println(result.collect().mkString(","))
    // get frane size
    val value: BigInt = spark.sessionState.executePlan(frame.queryExecution.logical).optimizedPlan.stats.sizeInBytes
    println(value, FileUtils.byteCountToDisplaySize(value.toLong))
    //stringToMysql(result.collect().mkString(","))
  }

  def dealHiveInfo(spark: SparkSession): Unit = {
    val frame: DataFrame = spark.sql("select * from cx_ads_safe.crowd_labels;")
    frame.show()
    val fields: Array[String] = frame.schema.fieldNames
    fields.foreach(println)

    val value: RDD[Row] = frame.rdd.map((row: Row) => {
      val upLabels: ArrayBuffer[String] = ArrayBuffer[String]()
      for (i <- 1 to fields.length - 2) {
        if ("是".equals(row.getAs(fields(i)))) {
          upLabels.append(fields(i).split('_')(1))
        }
      }
      Row(row.getAs("id"), if (upLabels.isEmpty) null else upLabels, row.getAs("primary_key"))
    })

    val newSchemaInfo: StructType = StructType(
      List(
        StructField("id", StringType, nullable = false),
        StructField("up_labels", ArrayType(StringType), nullable = true),
        StructField("primary_key", StringType, nullable = false)
      )
    )
    value.foreach(println)

    spark.createDataFrame(value, newSchemaInfo)
      .write.mode(SaveMode.Overwrite)
      .saveAsTable("cx_ads_safe.crowd_labels_result")
  }
}