package com.dataworker.spark.sql.execution

import com.dataworker.spark.sql.util.zip.{ProcessFile, ZipFileInputFormat}
import org.apache.commons.lang3.StringUtils

import java.util
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.io.{BytesWritable, Text}
import org.apache.spark.sql.execution.command.LeafRunnableCommand
import org.apache.spark.sql.{DataWorkerSQLException, Row, SparkSession}

case class LoadTableCommand(path: String,
                            tableName: String,
                            options: util.HashMap[String, String]) extends LeafRunnableCommand {

  override def run(sparkSession: SparkSession): Seq[Row] = {
    val hadoopConf = sparkSession.sparkContext.hadoopConfiguration
    val fs = FileSystem.get(hadoopConf)
    if (!fs.exists(new Path(path))) {
      throw new DataWorkerSQLException("文件不存在: " + path + ", 可以是HDFS完整路径，或者我的资源下路径")
    }

    if (path.endsWith(".zip")) {
      val zipFileRDD = sparkSession.sparkContext.newAPIHadoopFile(
        path,
        classOf[ZipFileInputFormat],
        classOf[Text],
        classOf[BytesWritable],
        hadoopConf)

      val jobCode = sparkSession.sparkContext.getConf.get("spark.datawork.job.code")
      val userId = sparkSession.sparkContext.getConf.get("spark.datawork.job.userId")
      if (StringUtils.isBlank(userId)) {
        throw new IllegalArgumentException("spark.datawork.job.userId 不能为空")
      }

      val userHome = "/user/dataworks/users/" + userId;
      if (!path.contains(userHome)) {
        val msg = "您访问的路径被拒绝：" + path + "，你只能访问以下路径文件：" + userHome;
        throw new SecurityException(msg);
      }

      val location = "/user/dataworks/load_temp/" + jobCode
      try {
        val fileType = options.get("fileType")

        zipFileRDD.foreach { zip =>
          var fileName = zip._1.toString
          fileName = fileName.substring(fileName.lastIndexOf("/") + 1, fileName.length)
          if (!fileName.startsWith(".")) {
            if (fileName.endsWith(".csv")) {
              val content = zip._2
              ProcessFile.write(hadoopConf, fileName, content, location)
            } else if (fileName.endsWith(".json")) {
              val content = zip._2
              ProcessFile.write(hadoopConf, fileName, content, location)
            }
          }
        }

        if ("csv".equals(fileType)) {
          val csvRDD = sparkSession.read.options(options).csv(location)
          csvRDD.createOrReplaceTempView(tableName)
          sparkSession.conf.set("spark.load.zip.temp.path", location)
        } else if ("json".equals(fileType)) {
          val jsonRDD = sparkSession.read.options(options).json(location)
          jsonRDD.createOrReplaceTempView(tableName)
          sparkSession.conf.set("spark.load.zip.temp.path", location)
        } else {
          throw new DataWorkerSQLException("文件格式不支持：" + fileType + ", zip文件请添加参数 fileType, 指定 csv 或 json")
        }
      } catch {
        case e: Exception =>
          fs.delete(new Path(location), true)
          throw new RuntimeException(e.getMessage, e)
      }

    } else if (path.endsWith(".csv")) {
      sparkSession.read.options(options).csv(path).createOrReplaceTempView(tableName)
    } else if (path.endsWith(".json")) {
      sparkSession.read.options(options).json(path).createOrReplaceTempView(tableName)
    } else if (path.endsWith(".xlsx")) {
      sparkSession.read
        .option("dataAddress", "MyTable[#All]")
        .option("useHeader", "true")
        .format("com.crealytics.spark.excel")
        .load(path).createOrReplaceTempView(tableName)
    } else {
      throw new Exception("当前只支持导入json、csv和xlsx文件")
    }
    Seq.empty[Row]
  }
}
