package com.edata.bigdata.hdfs

import com.edata.bigdata.annotations.Edata_Saver
import com.edata.bigdata.basic.Saver
import com.edata.bigdata.utils.DataFrameUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.ipc.StandbyException
import org.apache.spark.sql.functions._
import org.apache.spark.sql.{DataFrame, SparkSession}

import java.io.FileNotFoundException
import scala.util.control.Breaks.breakable

@Edata_Saver(target = "HDFSSAVER")
class HdfsSaver extends Saver with HdfsConnector {
  override var sourceType: String = "HDFS"
  override var session: SparkSession = _
  private var client: FileSystem = _
  private var client_capacity: Long = -1L
  private var active_namenode: String = _

  override def save(data: DataFrame, args: String*): Boolean = {
    if (client == null || client.getStatus().getCapacity < 0) {
      createHdfsClient()
    }
    if (args.isEmpty) {
      LOGGER.info(s"args should not be empty")
      return false
    }
    val filePath = active_namenode + args(0)
    if (!fileExisted(filePath)) {
      data.write.mode("overwrite").csv(filePath)
    } else {
      data.write.mode("append").csv(filePath)
    }
    return true

  }

  /*
  * args(0)：需要保存的文件路径
  * args(1): 分隔符
  * args(2): 更新的依据，常见为id
  * args(3): 列名集合，逗号分割
  * args(4): 列的类型集合，逗号分割
  * args(5): 列的是否允许为空集合，逗号分割
  * */
  override def update(data: DataFrame, args: String*): Boolean = {
    if (client == null || client.getStatus().getCapacity < 0) {
      createHdfsClient()
    }
    if (args.isEmpty) {
      LOGGER.info(s"args should not be empty")
      return false
    }
    val filePath = active_namenode + args(0)
    if (!fileExisted(filePath)) {
      throw new FileNotFoundException(s"file does not exist: ${filePath}")
    }
    //检查dataframe中数据列是否为hdfs中数据文件的列的子集
    data.schema.fields.map(f=>{
      if(!args(2).contains(f)){
        throw new Exception(s"column ${f} is not existed in ${args(2)}")
      }
    })

    val colNames = args(3).split(",")
    val colTypes = DataFrameUtils.parseDataTypeByStr(args(4).split(","))
    val nullables = DataFrameUtils.parseBooleanByStr(args(5).split(","))

    val rdd = session.sparkContext.textFile(filePath)
    val dataInHdfs = DataFrameUtils.createDataFrame(session, rdd, args(1), colNames, colTypes, nullables)
    val joinData = dataInHdfs.as("a").join(data.as("b"), Seq(args(2)), "leftouter")
    val updatedCols = data.schema.fields.map(f=>{
      f.name
    })
    //coalesce(A.col,B.col)，如果A在col这一列的数据不为空，则选择A的该列，否则选择B的列的数据
    val selectExpr=colNames.map(colName=>{
      if(updatedCols.contains(colName)){
        coalesce(column(s"b.${colName}"),column(s"a.${colName}"))
      }else{
        column(s"a.${colName}")
      }
    })
    val updatedData = joinData.select(selectExpr:_*)
    updatedData.write.mode("overwrite").csv(filePath)
    return true
  }

  override def delete(data: DataFrame, args: String*): Boolean = {
    if (client == null || client.getStatus().getCapacity < 0) {
      createHdfsClient()
    }
    if (args.isEmpty) {
      LOGGER.info(s"args should not be empty")
      return false
    }
    val filePath = active_namenode + args(0)
    if (!fileExisted(filePath)) {
      throw new FileNotFoundException(s"file does not exist: ${filePath}")
    }
    //检查dataframe中数据列是否为hdfs中数据文件的列的子集
    data.schema.fields.map(f => {
      if (!args(2).contains(f)) {
        throw new Exception(s"column ${f} is not existed in ${args(2)}")
      }
    })

    val colNames = args(3).split(",")
    val colTypes = DataFrameUtils.parseDataTypeByStr(args(4).split(","))
    val nullables = DataFrameUtils.parseBooleanByStr(args(5).split(","))
    val rdd = session.sparkContext.textFile(filePath)
    val dataInHdfs = DataFrameUtils.createDataFrame(session, rdd, args(1), colNames, colTypes, nullables)
    val usingCols = data.schema.fields.map(f=>{
      f.name
    }).toSeq
    val joinData = dataInHdfs.join(data, usingCols, "leftanti")
    joinData.write.mode("overwrite").csv(filePath)
    return true
  }

  override def saveOrUpdate(data: DataFrame, args: String*): Boolean = {
    ???
  }

  def createHdfsClient(): Unit = {
    val conf = new Configuration()
    breakable {
      val nns = HDFS_ENTRYPOINT.split(",")
      for (nn <- nns) {
        conf.set("fs.defaultFS", URI_PREFIX + nn)
        client = FileSystem.get(conf)
        try {
          client_capacity = client.getStatus.getCapacity
          active_namenode = URI_PREFIX + nn
          LOGGER.info(s"${nn} is active")
        } catch {
          case ex: StandbyException => {
            LOGGER.error(s"${nn} is not active")
          }
        }
      }
    }
  }

  def fileExisted(path: String): Boolean = {
    val file = new Path(path)
    client.exists(file)
  }
}
