package cn.seecoder.ai.utils


import org.apache.spark.sql.types.StructField
import org.apache.spark.sql.{Column, DataFrame, SparkSession}
import org.slf4j.{Logger, LoggerFactory}
import org.springframework.beans.factory.annotation.Autowired
import org.springframework.stereotype.Component

@Component
class TestSpark {
  @Autowired
  var sparkSession:SparkSession = _

  def readHdfs(): String = {
    val hdfsPath = "hdfs://172.29.7.102:9000/user-space/fanya/543545.csv"
    val parquetFile = sparkSession.read.parquet(hdfsPath)
    parquetFile.show()
    "读取成功！"
  }



}

object TestSparkSql{

  //神奇的，当使用spring运行时，标记了所有spark相关为provided，可以运行，没问题；
  // 当直接main运行，去掉了provided，提示get，set都无效了。

  val log: Logger = LoggerFactory.getLogger(getClass)
  def main(args: Array[String]): Unit = {
    val sparkSession :SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("test")
      .getOrCreate()

    val fileUri = "hdfs://172.29.7.102:9000/user-space/fanya/parquet/user-1/西瓜训练集888"
    val df: DataFrame = sparkSession.read.parquet(fileUri).persist()
    // Print the schema in a tree format
    df.printSchema()

    log.info("打印")

    for (column:String <- df.columns){
      val structField = df.schema(column)
      val dataType = structField.dataType
      val fieldName = structField.name

      //计算null比率
      val nullRowCount = df.filter(df(column).isNull).count()
      val totalRowCount = df.count()
      val nullRatio = nullRowCount/totalRowCount

      println(s"Column Name: $column")
      println(s"Data Type: $dataType")
      println(f"Null Percentage: $nullRatio%.2f%%")


    }




  }
}
