package com.edata.bigdata.s3a

import com.edata.bigdata.annotation.Edata_Executor
import com.edata.bigdata.util.{EDataUtils, Executor}
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.ipc.StandbyException
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

import java.io.FileNotFoundException
import java.net.URI

@Edata_Executor(target = "S3AEXECUTOR")
class SparkS3AExecutor extends Executor {

  override var SESSION: SparkSession = _
  var CLIENT: FileSystem = _
  var CLIENT_CAPACITY: Long = -1L
  var ACTIVE_URL: String = _

  override def initialize(): Unit = {
    createMinClient()
  }

  override def findData[T](args: String*)(implicit bean: Manifest[T]): DataFrame = {
    if (args.isEmpty) {
      throw new Exception("args parameter should not be empty")
    }
    findDataFromPath(args(0), args(1))
  }

  override def saveData[T](data: RDD[String], args: String*)(implicit bean: Manifest[T]): Unit = {
    if (args.isEmpty) {
      throw new Exception("args parameter should not be empty")
    }
    saveDataToPath(data, args(0))
  }

  override def updateData[T](data: RDD[String], args: String*)(implicit bean: Manifest[T]): Unit = {
    if (args.isEmpty) {
      throw new Exception("args parameter should not be empty")
    }
    val p = ACTIVE_URL + args(0)
    if (fileExisted(p)) {
      LOGGER.info(s"file:${p} exist,deleting...")
      CLIENT.delete(new Path(p), true)
      //CLIENT.deleteOnExit(filePath) ,使用该语句时，文件系统不会马上删除文件
    }
    saveDataToPath(data, args(0))
  }

  override def findDataFromPath[T](path: String, seperator: String)(implicit bean: Manifest[T]): DataFrame = {
    if (!fileExisted(ACTIVE_URL+path)) {
      throw new FileNotFoundException(s"File path does not exist: ${ACTIVE_URL + path}")
    }
    val rdd = SESSION.sparkContext.textFile(ACTIVE_URL+path)
    createDataFrame(rdd, seperator)
  }

  override def saveDataToPath[T](data: RDD[String], path: String)(implicit bean: Manifest[T]): Unit = {
    data.saveAsTextFile(ACTIVE_URL+path)
  }

  override def findDataById[T](value: String, args: String*)(implicit bean: Manifest[T]): DataFrame = {
    if (args.isEmpty) {
      throw new Exception("args parameter should not be empty")
    }
    val data = findDataFromPath(args(0), args(1))
    val className = bean.toString()
    val (_,id, _) = find_SCHEMA(className)
    data.filter(id + "==" + EDataUtils.convertVToQueryStrByDataType(value, id.split(":")(1)))
  }

  override def findDataByProperty[T](key: String, value: String, args: String*)(implicit bean: Manifest[T]): DataFrame = {
    if (args.isEmpty) {
      throw new Exception("args parameter should not be empty")
    }
    val data = findDataFromPath(args(0), args(1))
    data.filter(s"${key}==${EDataUtils.convertVToQueryStrByDataType(value, "string")}")
  }


  def fileExisted(path: String): Boolean = {
    val file = new Path(path)
    CLIENT.exists(file)
  }

  def createMinClient(): Unit = {
    SESSION.sparkContext.hadoopConfiguration.set("fs.s3a.access.key", S3_USER)
    SESSION.sparkContext.hadoopConfiguration.set("fs.s3a.secret.key", S3_PASSWORD)
    SESSION.sparkContext.hadoopConfiguration.set("fs.s3a.endpoint", S3_ENTRYPOINT)
    SESSION.sparkContext.hadoopConfiguration.set("fs.s3a.connection.ssl.enabled", "false")
    SESSION.sparkContext.hadoopConfiguration.set("fs.s3a.impl", classOf[org.apache.hadoop.fs.s3a.S3AFileSystem].getName)
    val uri = new URI("s3a://" + S3_BUCKET)
    CLIENT = FileSystem.get(uri, SESSION.sparkContext.hadoopConfiguration)
    try {
      CLIENT_CAPACITY = CLIENT.getStatus.getCapacity
      ACTIVE_URL = "s3a://" + S3_BUCKET
      LOGGER.info(s"${S3_ENTRYPOINT} is active")
    } catch {
      case ex: StandbyException => {
        LOGGER.warn(s"${S3_ENTRYPOINT} is not active")
      }
    }

  }
  //
  //  override def findDataByProperty[T](key: String, value: String)(implicit bean: Manifest[T]): DataFrame = {
  //    ???
  //  }

}
