package com.ctbri.manage.bydeequ.calculate
import org.apache.spark.SparkContext
import org.apache.spark.sql.{Column, Dataset, Row, SaveMode, SparkSession}
import org.apache.spark.sql.functions._
import com.ctbri.manage.quality.config.YamlReader
import org.apache.spark.sql.types.{DataType, DataTypes, StructField, StructType}
import org.yaml.snakeyaml.Yaml

import java.io.{File, FileInputStream}
import java.util
import scala.collection.JavaConverters.asScalaBuffer
/**
 * @author songyunlong
 * @createTime 2023/6/12 10:09
 * @description
 */
case class DataOverallOp(configPath: String=null, needSaveToHive: Boolean=true){
    YamlReader.getInstance(this.configPath).setConfForScala(new Yaml().load(new FileInputStream(new File(this.configPath))).asInstanceOf[util.Map[String, AnyRef]])
    val conf = YamlReader.getInstance(this.configPath).getConfForScala
    println("file path of main config:")
    println(conf)
    val appName = YamlReader.getInstance(this.configPath).getString("appName")
    val masterSet = YamlReader.getInstance(this.configPath).getString("masterSet")
    val logLevel = YamlReader.getInstance(this.configPath).getString("logLevel")
    val dataExportPath = YamlReader.getInstance(this.configPath).getString("dataExportPath")
    val dataSource = YamlReader.getInstance(this.configPath).getString("dataSource")
    val databaseName = YamlReader.getInstance(this.configPath).getString("databaseName")
    val tableName = YamlReader.getInstance(this.configPath).getString("tableName")
    val newDataframeName = YamlReader.getInstance(this.configPath).getString("newDataframeName")
    val columnNames = YamlReader.getInstance(this.configPath).getList[String]("columnNames")
    val columnTypes = YamlReader.getInstance(this.configPath).getList[String]("columnTypes")

    //todo:读入others字段等
    def matchTypes(colType: String): DataType= colType match {
        case "string" => DataTypes.StringType
        case "int" => DataTypes.IntegerType
        case "long" => DataTypes.LongType
        case "double" => DataTypes.DoubleType
        //todo: 补充类型
        case _ => DataTypes.NullType
    }
    def columnTypeTransform(data: Dataset[Row]): Dataset[Row]={
        //总运行函数
        val withColumnsForTransform = (cols: Seq[String], df: Dataset[Row], f: (String)=>Column,
            name : String => String) => cols.foldLeft(df)((df, c) => df.withColumn(name(c), f(c)))
        // 设置columnTypes的自增索引功能
        var index = 0
//        val selfAdd = (index: Int) => index + 1
        // 设置列类型转换功能，且每次转换都需要自增索引
        def castType(colName: String): Column={
            val result = col(colName).cast(this.matchTypes(this.columnTypes.get(index)))
            index += 1
            result
        }
        val result = withColumnsForTransform(asScalaBuffer(this.columnNames).toSeq, data, castType, colName=>colName)
        result.printSchema()
        result.show(numRows=20, truncate=false)
        //如果没有通过hive终端建的表，format是orc，如果是通过hive终端建的表，则format是Hive
        if (this.needSaveToHive) result.write.format("orc").mode(SaveMode.Append).saveAsTable(s"${this.databaseName}.${this.newDataframeName}")
        result
    }

    val getSpark = this.dataSource match {
        case "hive" => this.envSetHive(appName = this.appName, masterSet = this.masterSet, logLevel = this.logLevel)
        case _ => this.envSet(appName = this.appName, masterSet = this.masterSet, logLevel = this.logLevel)
    }

    def getDataset(structType: StructType = null): Dataset[Row] = {
        if (this.dataSource.equals("excel")) {
            this.getDataFromExcel(path = this.dataExportPath, spark = this.getSpark)
        }
        else if (this.dataSource.equals("hive")) this.getDataFromHive(spark = this.getSpark, databaseName = this.databaseName, tableName = this.tableName)
        else null
    }
    def withSpark(func: SparkSession => Dataset[Row]): Unit = {
        val session = this.getSpark
        session.sparkContext.setCheckpointDir(System.getProperty("java.io.tmpdir"))
        try {
            func(session)
        } finally {
            session.stop()
            System.clearProperty("spark.driver.port")
        }
    }
    def envSet(appName: String, masterSet: String, logLevel: String): SparkSession = {
        val spark = SparkSession.builder.appName(appName)
            .master(masterSet)
            .getOrCreate
        spark.sparkContext.setLogLevel(logLevel)
        spark
    }

    def envSetHive(appName: String, masterSet: String, logLevel: String): SparkSession = {
        val spark = SparkSession.builder.appName(appName)
            .enableHiveSupport.getOrCreate
        spark.sparkContext.setLogLevel(logLevel)
        spark
    }

    def getDataFromExcel(path: String, spark: SparkSession): Dataset[Row] = {
        spark.read
            .format("com.crealytics.spark.excel")
            .option("header", "true")
            .option("treatEmptyValuesAsNulls", "true")
            // 自动推断schema
            .option("inferSchema", "true")
            .load(path);
    }

    def getDataFromHive(spark: SparkSession, databaseName: String, tableName: String): Dataset[Row] = {
        val sql = "select * from " + databaseName + "." + tableName;
        spark.sql(sql);
    }
//    def getDataFromExcel(path: String, spark: SparkSession, structType: StructType): Dataset[Row] = {
//        spark.read
//            .format("com.crealytics.spark.excel")
//            .option("header", "true")
//            .option("treatEmptyValuesAsNulls", "true")
//            .schema(structType)
//            .load(path);
//    }
    //todo: 支持configPath中提供的其他配置要求，如partion等等
}

 object DataOverallOp {
     def main(args: Array[String]): Unit={
         val dataOverallOp = new DataOverallOp(configPath=args(0), needSaveToHive=false) //集群上需要改成true
         dataOverallOp.withSpark{session=>dataOverallOp.columnTypeTransform(dataOverallOp.getDataset())}
     }
 }
