package com.edata.bigdata.hdfs

import com.edata.bigdata.annotation.Edata_Executor
import com.edata.bigdata.util.{EDataUtils, Executor}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.ipc.StandbyException
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

import java.io.FileNotFoundException
import scala.util.control.Breaks.{break, breakable}

@Edata_Executor(target = "HDFSEXECUTOR")
class SparkHdfsExecutor extends Executor {

  override var SESSION: SparkSession = _
  var CLIENT: FileSystem = _
  var CLIENT_CAPACITY: Long = -1L
  var ACTIVE_URL: String = _
  val URI_PREFIX = "hdfs://"


  override def initialize(): Unit = {
    createHdfsClient()
  }

  override def findDataFromPath[T](path: String, seperator: String)(implicit bean: Manifest[T]): DataFrame = {
    if(!fileExisted(ACTIVE_URL + path)){
      throw new FileNotFoundException(s"File path does not exist: ${ACTIVE_URL + path}")
    }
    LOGGER.info(s"finding data from ${path}")
    val rdd = SESSION.sparkContext.textFile(ACTIVE_URL + path)
    createDataFrame(rdd, seperator)
  }


  override def saveDataToPath[T](data: RDD[String], path: String)(implicit bean: Manifest[T]): Unit = {
    LOGGER.info(s"saving data to ${path}")
    data.saveAsTextFile(ACTIVE_URL + path)
  }

  override def findData[T](args: String*)(implicit bean: Manifest[T]): DataFrame = {
    if(args.isEmpty){
      throw new Exception("args parameter should not be empty")
    }
    findDataFromPath(args(0),args(1))
  }

  override def saveData[T](data: RDD[String],args:String*)(implicit bean: Manifest[T]): Unit = {
    if (args.isEmpty) {
      throw new Exception("args parameter should not be empty")
    }
    saveDataToPath(data,args(0))
  }

  override def updateData[T](data: RDD[String], args: String*)(implicit bean: Manifest[T]): Unit = {
    if (args.isEmpty) {
      throw new Exception("args parameter should not be empty")
    }
    val p = ACTIVE_URL + args(0)
    if (fileExisted(p)) {
      LOGGER.info(s"file:${p} exist,deleting...")
      CLIENT.delete(new Path(p), true)
      //CLIENT.deleteOnExit(filePath) ,使用该语句时，文件系统不会马上删除文件
    }
    saveDataToPath(data, args(0))
  }

  override def findDataById[T](value: String, args: String*)(implicit bean: Manifest[T]): DataFrame = {
    if (args.isEmpty) {
      throw new Exception("args parameter should not be empty")
    }
    val data = findDataFromPath(args(0), args(1))
    val className = bean.toString()
    val (_,id, _) = find_SCHEMA(className)
    val k_t = id.split(":")
    data.filter(id + "==" + EDataUtils.convertVToQueryStrByDataType(k_t(0), k_t(1)))
  }

  override def findDataByProperty[T](key: String, value: String, args: String*)(implicit bean: Manifest[T]): DataFrame = {
    if (args.isEmpty) {
      throw new Exception("args parameter should not be empty")
    }
    val data = findDataFromPath(args(0),args(1))
    data.filter(s"${key}==${EDataUtils.convertVToQueryStrByDataType(value, "string")}")
  }


  def fileExisted(path:String):Boolean={
    val file = new Path(path)
    CLIENT.exists(file)
  }

  def createHdfsClient(): Unit = {
    val configuration = new Configuration()
    breakable {
      val nns = HDFS_ENTRYPOINT.split(",")
      for (nn <- nns) {
        configuration.set("fs.defaultFS", URI_PREFIX + nn)
        CLIENT = FileSystem.get(configuration)
        try {
          CLIENT_CAPACITY = CLIENT.getStatus.getCapacity
          ACTIVE_URL = URI_PREFIX + nn
          LOGGER.info(nn + " is active")
          break
        } catch {
          case ex: StandbyException => {
            LOGGER.warn(nn + " is not active")
          }
        }
      }
    }
  }

}
