package cn.seecoder.ai.service

import cn.seecoder.ai.utils.HdfsHelper
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
import org.springframework.beans.factory.annotation.Autowired
import org.springframework.stereotype.Service

import java.util.Properties

@Service
class FileScalaService {

  @Autowired
  var sparkSession: SparkSession = _

//  @Autowired
//  var hdfsHelper: HdfsHelper = _


  def saveTable(fileUri: String,
                url:String,
                user:String,
                password:String,
                tableName:String): String = {

    val prop = new Properties()
    prop.put("user", user)
    prop.put("password", password)

    val dataFrame: DataFrame = sparkSession.read
      .jdbc(url,
        tableName,
        prop)


    dataFrame.show(20)
    dataFrame.coalesce(1).write
      .mode(SaveMode.Overwrite)
      .option("header", "true")
      .csv(fileUri)

    var reread = sparkSession.read.option("header", "true")
      .csv(fileUri)

    reread.show(20)
    fileUri
  }


  def saveTable(fileUri: String): String ={

    val prop = new Properties()
    prop.put("user", "root")
    prop.put("password", "123456")

    val dataFrame: DataFrame = sparkSession.read
      .jdbc("jdbc:mysql://localhost:3306/seecoder_ai?characterEncoding=utf8&&serverTimezone=Asia/Shanghai",
        "config",
        prop)


    dataFrame.show(20)
    dataFrame.coalesce(1).write
      .mode(SaveMode.Overwrite)
      .option("header", "true")
      .csv(fileUri)

    var reread = sparkSession.read.option("header", "true")
      .csv(fileUri)

    reread.show(20)
    fileUri
  }


}
