
//import org.apache.spark.sql.UDFRegistration
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.functions._
import org.apache.spark.sql.Column
import org.apache.spark.sql.Row
import org.apache.spark.sql.types._
import org.apache.spark.rdd.RDD
import org.apache.commons.lang3.StringUtils

import scala.collection.mutable.Map
import scala.collection.JavaConversions._

import com.databricks.spark.xml._

import java.nio.file.{Files, Path, Paths, StandardCopyOption}
import java.io.{BufferedInputStream, File, FileInputStream, FileOutputStream, IOException, InputStream, FileReader, FileWriter, BufferedWriter, OutputStreamWriter}
import java.util.Properties
//import java.time.LocalDate 
import java.sql.DriverManager
import java.sql.Connection
import java.sql.Date


object Input {

  /*
    implicit class OpsNum(val str: String) extends AnyVal {
      def isNumeric() = scala.util.Try(str.toDouble).isSuccess
    }
    */

    var prop:Properties = null
    var uuidUdf: UserDefinedFunction = null
    val jdbcOpt = scala.collection.mutable.Map[String,String]()
    //var NumericTypeString = Array("ByteType", "DecimalType", "DoubleType", "FloatType", "IntegerType", "LongType", "ShortType")
    val obtainUUid = () => java.util.UUID.randomUUID().toString().replace("-","")

    val dbUrl = readProp("database.url")
    val dbDriver = readProp("database.driver")
    val dbSsl= readProp("database.useSSL")
    val dbUser= readProp("database.user")
    val dbPsw= readProp("database.password")
    val encode= readProp("xml.encode")
    val xmlInputDir = readProp("xml.input.dir")
    val xmlOutputDir = readProp("xml.output.dir")
    val xmlMergeDir = readProp("xml.merge.dir")


  val MsgDataSchema: StructType = StructType(
      List(
        StructField("OID", StringType),
        StructField("SERIAL_NO", StringType),
        StructField("DOC_TYPE", StringType)
        ,StructField("BIZ_TYPE", StringType)
        ,StructField("FILE_NAME", StringType)
        ,StructField("STORAGE_TIME", DateType)
        ,StructField("BIG_DATA", StringType)
        ,StructField("PROCESSING_TIME", DateType)
        ,StructField("ENT_CODE", StringType)
        ,StructField("ENT_NAME", StringType)
        ,StructField("STATUS", StringType)
        ,StructField("DOC_OID", StringType)
      )
    )

    @throws[IOException]
    def readProp(name: String): String = {
 
        //val in0 = ReadPropFileUtil.getClass.getClassLoader.getResourceAsStream("application.properties")
        //prop.load(in0)
 
        if(prop == null){
          prop = new Properties
          loadCurrPathProp(prop)
        }
 
        val propVal = prop.getProperty(name)
        if (null == propVal) {
            throw new NullPointerException("read properties key={" + name + "}  is null!")
        }
 
        propVal
    }
 
    @throws[IOException]
    private def loadCurrPathProp(prop: Properties): Unit = {
        var fileName: String = null
        fileName = System.getProperty("user.dir") + File.separator + "application.properties"
        println(fileName)
 
        val file = new File(fileName)
        if (file.exists) {
            val in = new BufferedInputStream(new FileInputStream(file))
            prop.load(in)
        }
    }

  def insert(df: DataFrame, jdbcOpt:Map[String, String], tableName: String) {
    df.show
    jdbcOpt += ("dbtable"->tableName)
    df.write.format("jdbc") .options(jdbcOpt) .mode("append").save()
  }

  /*
  def selectNaNColumn(df: DataFrame, columns:String*) :DataFrame = {
    var tbl :DataFrame = df
    val fields = tbl.columns.map(_.toUpperCase)
    for( i <- columns){
      if(fields.contains(i.toUpperCase)){
        //tbl = tbl.withColumn()
      }
    }
  }
  */

  def filterNbr(tbl: DataFrame, colName: String): DataFrame = {

    var df: DataFrame = tbl
    if(tbl.columns.map(_.toUpperCase).contains(colName.toUpperCase)){
      df= tbl.filter(col(colName).rlike("""^[0-9]+\.?[0-9]*$"""))
    }
      df
  }

  def attachColumn(df: DataFrame, colName: String, alias: String): DataFrame= {

    var tbl: DataFrame = df
    val fields = df.columns.map(_.toUpperCase)
    if(colName != alias && fields.contains(colName.toUpperCase) && !fields.contains(alias.toUpperCase)){
      tbl = df.withColumn(alias, col(colName)).drop(colName)
    }

    tbl
  }
  def attachColumnDate(df: DataFrame, colName: String, alias: String): DataFrame= {
      var tbl: DataFrame = df
      if(df.columns.map(_.toUpperCase).contains(colName.toUpperCase)){
        tbl= df.withColumn(alias, colNameToDate(colName)).drop(colName)
      }
      tbl
  }

  def selectColumn(df: DataFrame) : DataFrame = {
    var tbl = df

    tbl = filterNbr(tbl, "qty")
    tbl = filterNbr(tbl, "price")
    //tbl = filterNbr(tbl, "price_")
    tbl = filterNbr(tbl, "totalPrice")
    tbl = filterNbr(tbl, "gnum")
    tbl = attachColumn(tbl, "itemNo", "ITEM_NO")
    //tbl = attachColumn(tbl, "itemNo_", "ITEM_NO")
    tbl = attachColumn(tbl, "itemName", "ITEM_NAME")
    tbl = attachColumn(tbl, "itemDescribe", "ITEM_DESCRIBE")
    tbl = attachColumn(tbl, "barCode", "BAR_CODE")
    //tbl = attachColumn(tbl, "_barCode", "BAR_CODE")
    tbl = attachColumn(tbl, "totalPrice", "TOTAL_PRICE")
    tbl = tbl.withColumn("OID", uuidUdf())
    //tbl = attachColumn(tbl, "price_", "price")

    tbl = tbl .selectExpr("ITEM_NO", "gnum", "unit", "currency", "price", "qty", "note", "OID"
      //"replace(uuid(),\"-\",\"\") as OID", 
      ,"ITEM_NAME"
      , "ITEM_DESCRIBE"
      , "BAR_CODE"
      , "TOTAL_PRICE")
    tbl
  }

  def fetch_message_dat(spark: SparkSession, df: DataFrame, filePath: Path, docOid: String, status: String) :String = {
    var tbl :DataFrame = null
    var rows :DataFrame = null

    rows = df.select("BaseTransfer.*")
    val bizType = rows.select("bizType").first.apply(0).toString
    val copCode = rows.select("copCode").first.apply(0).toString
    val copName = rows.select("copName").first.apply(0).toString

    rows = df.select("Order.*").select("OrderHead.*")
    val guid = rows.select("guid").first.apply(0).toString

    //import spark.implicits._
    val xml = spark.sparkContext.wholeTextFiles(filePath.toString).first._2.toString

    val oid = obtainUUid()
    val now = new Date(System.currentTimeMillis())

    // Create DataFrame with one Row
    val row: RDD[Row] = spark.sparkContext.parallelize(Seq(Row(oid, guid, "0", bizType, filePath.getFileName.toString, 
      now, xml, now, copCode, copName, status, docOid)))

    tbl= spark.createDataFrame(row, MsgDataSchema)

    insert(tbl, jdbcOpt, "dc_message_data")

    null

  }

  def fetch_baseTrans(df: DataFrame) {
    val tbl = df.select("BaseTransfer.*")
      //.selectExpr("replace(uuid(),\"-\",\"\") as OID", "copCode as ENT_CODE", "copName as ENT_NAME")
      .selectExpr("copCode as ENT_CODE", "copName as ENT_NAME")
      .distinct
      .withColumn("OID", uuidUdf())
    insert(tbl, jdbcOpt, "dc_ent_info")
  }

  def colNameToDate(columnName: String) : Column ={
    val field = to_date(col(columnName).cast("String"), "yyyyMMddHHmmSS")
    field
  }

  def fetch_head(df: DataFrame) : String={

    var tbl :DataFrame = df.select("Order.*").select("OrderHead.*")
    tbl = filterNbr(tbl, "freight")
    tbl = filterNbr(tbl, "price")
    tbl= filterNbr(tbl, "goodsValue")
    tbl= attachColumn(tbl, "ebpName", "EBP_NAME")
    tbl= attachColumn(tbl, "orderNo", "ORDER_NO")
    tbl= attachColumn(tbl, "ebpCode", "EBP_CODE")
    tbl = attachColumnDate(tbl, "appTime", "app_time")
    tbl = attachColumn(tbl, "imgPath", "IMG_PATH")
    tbl = attachColumn(tbl, "appStatus", "app_status")
    tbl = tbl.withColumn("OID", uuidUdf())

    tbl = tbl.select(
        col("appType").alias("app_type"), 
        col("app_time"),
        col("app_status"), 
        col("orderType").alias("ORDER_TYPE"), 
        col("ORDER_NO"), 
        col("EBP_CODE"), 
        col("EBP_NAME"),
        col("ebcCode").alias("EBC_CODE"), 
        col("ebcName").alias("EBC_NAME"), 
        col("goodsValue").alias("GOODS_VALUE"), 
        col("IMG_PATH")
        ,col("guid").alias("guid")
        ,col("freight").alias("FREIGHT")
        ,col("currency").alias("CURRENCY")
        ,col("note").alias("NOTE")
        ,col("OID")
      )
        
    insert(tbl, jdbcOpt, "dc_order_head")

    tbl.select("OID").first.apply(0).toString
  }

  def fetch_list(df: DataFrame) {

    val tbl= df.select("Order.*").select("OrderList")
    if( tbl.schema.fields(0).dataType.typeName == "array" ){
      insert(selectColumn(tbl.select(explode(col("OrderList")))).select("col.*"), jdbcOpt, "dc_order_list")
    } 
    else if ( tbl.schema.fields(0).dataType.typeName == "struct" ){
      insert(selectColumn(tbl.select("OrderList.*")), jdbcOpt, "dc_order_list")
    }
  }

  def process(spark: SparkSession, path: Path) {

    val df = spark.read .option("rowTag", "CEB303Message") .xml(path.toString())

    fetch_baseTrans(df)

    val headOid = fetch_head(df)
    fetch_list(df)

    //fetch_message_dat(spark,df, path, headOid, "S")

    //println(Files.move( path, Paths.get("output" + File.separator + path.getFileName()), StandardCopyOption.REPLACE_EXISTING))
  }

  def log() {
    val oid = obtainUUid()

    val now = new Date(System.currentTimeMillis())
    val begin = now
    val end = now

    val status = "S"
    val total = 0
    val avg = 0
    val cnt = 0
    val success = 0
    val fail = 0

    var connection:Connection = null
    connection = DriverManager.getConnection(dbUrl, dbUser, dbPsw)
    val st = connection.createStatement()
    val sql =s"insert into dc_message_log values('$oid', '$begin', '$end', '$status', '$total', '$avg', '$cnt', '$success', '$fail')"
    st.addBatch(sql)
    st.executeBatch()
  }

  def mergeXml: Path={
    val outputXmlName = obtainUUid() + ".xml"
    val outputXmlPath = Paths.get(outputXmlName)
    //val outputWriter = new FileWriter(outputXmlPath.toFile, true)
    val bw = new BufferedWriter (new OutputStreamWriter (new FileOutputStream (outputXmlPath.toFile,true),"UTF-8"))

    Files.newDirectoryStream(Paths.get(xmlInputDir))
      .filter(_.getFileName.toString.endsWith(".xml"))
      .map(_.toAbsolutePath).map((filePath: Path) => {

        //println(filePath.toString)
        scala.io.Source.fromFile(filePath.toFile, encode).getLines().foreach{ l=> 
          bw.write("\n")
          bw.write(l)
        }

      })

  bw.close()

  outputXmlPath
  }

  def getXmlMergePath: Path={
    val fp = Paths.get(xmlMergeDir).toFile.listFiles .filter(_.getName.toString.endsWith(".xml")) .map(_.getAbsolutePath).toList.head
    if (fp ==null)
      throw new Exception("No merge xml found")
    Paths.get(fp)
  }

  def printTimestamp {
    //DateTime.now().toString("yyyy-MM-dd HH:mm:SS")
    println(getClass.getSimpleName + ": " + new java.util.Date())
  }

  def main(args: Array[String]): Unit = {

    printTimestamp

    //val path = mergeXml
    val path = getXmlMergePath

    val spark = SparkSession.builder.getOrCreate()
    import spark.implicits._
    spark.sql("set spark.sql.legacy.timeParserPolicy=LEGACY")

    uuidUdf= udf(obtainUUid)

    jdbcOpt += ("url"->dbUrl, "useSSL"->dbSsl,"driver"->dbDriver,"user"->dbUser,"password"->dbPsw)

    process(spark, path)

    //path.toFile.delete

    spark.stop

    //log
    printTimestamp

  }

}
