package com.sugon.dataexport

import java.io.{File, FileInputStream}
import java.nio.charset.StandardCharsets
import java.security.MessageDigest
import java.text.SimpleDateFormat
import java.util
import java.util.{Date, stream}

import org.apache.spark.sql.functions._
import cn.hutool.crypto.digest.MD5
import com.alibaba.fastjson.{JSON, JSONObject}
import org.apache.hadoop.io.LongWritable
import org.apache.hadoop.mapred.TextInputFormat
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.types.{DataTypes, StructField, StructType}
import org.apache.spark.sql.{Column, DataFrame, Row, SparkSession}

import scala.collection.mutable.ArrayBuffer
import scala.io.{BufferedSource, Source}
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.{LongWritable, Text}
import org.apache.hadoop.mapred.TextInputFormat
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.types.{StructField, StructType, _}

import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer


/**
  * spark-submit --master yarn --num-executors 3  --files /opt/sugon/ww  /opt/sparkdemoscala-1.0-SNAPSHOT.jar  cs.json
  *
  * 1. 接收配置文件
  * 2. 解析配置
  * 3.
  *
  * 未解决 字符编码问题
  *
  */
object DataExport {

  def main(args: Array[String]): Unit = {


    val spark: SparkSession = SparkSession.builder().appName("read config file")
      .master("local")
      //      .enableHiveSupport()
      .getOrCreate()

    //    val fileName = args(0)

    //    val content: String = readJob(new File(fileName))

    val content = "{\"job\": {\"content\": [{\"reader\": {\"parameter\": {\"sourcePath\": \"文件来源\", \"defaultFS\": \"hdfs://xxx:port\", \"fileType\": \"csv\", \"encoding\": \"UTF-8\", \"fieldDelimiter\": \",\", \"header\": \"true\", \"column\": [{\"name\": \"id\", \"type\": \"String\"}, {\"name\": \"name\", \"type\": \"String\"}, {\"name\": \"age\", \"value\": \"int\"} ] }, \"name\": \"sparkReader\"}, \"writer\": {\"parameter\": {\"dbName\":\"ysk\", \"tableName\": \"abc\", \"partition\": [\"month\", \"year\"], \"storeStyle\": \"orc\"}, \"name\": \"sparkWriter\"} } ] } }"

    val jobParams: ConfigurationJson = getJsonConfiguration(content)

    // 获取 csv 文本类型
    val fileType: String = jobParams.getString("reader.parameter.fileType", "csv")
    // 获取间隔符
    val fieldDelimiter: String = jobParams.getString("reader.parameter.fieldDelimiter", ",")
    // 是否有header头文件
    val header: String = jobParams.getString("reader.parameter.header", "false")
    // 文件路径
    val sourcePath: String = jobParams.getString("reader.parameter.sourcePath")
    // 文件系统类型
    val defaultFS: String = jobParams.getString("reader.parameter.defaultFS")
    // 字段类型
    val columns: util.List[Object] = jobParams.getList("reader.parameter.column")

    val columnsScala: mutable.Buffer[Object] = columns.asScala

    val result: DataFrame = spark.read.format(fileType)
      .option("delimiter", fieldDelimiter)
      .option("header", header)
      .option("quote", "'")
      .option("nullValue", "\\N")
      .load(defaultFS + sourcePath)

    result.printSchema()

    /**
      * 有文件头 不配置 schema
      */
    val frame: DataFrame = if ("true".equalsIgnoreCase(header)) {
      result
    } else {

      val columnList: String = columnsScala.toStream.map((s: Object) => s.asInstanceOf[JSONObject].getString("name")).mkString("(", ",", ")")

      result.toDF(columnList)

    }

    val arrayBuffer = ArrayBuffer(
      StructField("Id", StringType, nullable = true),
      StructField("mac", StringType, nullable = true),
      StructField("brand", StringType, nullable = true),
      StructField("memo3", StringType, nullable = true)
    )


    //    val schema = StructType(arrayBuffer.toList)
    //
    //
    //    spark.createDataFrame(result.rdd, schema)


    //
    //    // 自定义udf的函数
    //    val localTime: () => Long = () => {
    //      val date = new java.util.Date
    //      date.getTime / 1000
    //    }
    //
    //    val localTimeStamp: () => String = () => {
    //      new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date)
    //    }
    //
    //    val addColTimeValue: UserDefinedFunction = udf(localTime)
    //
    //    val addColTimeStamp: UserDefinedFunction = udf(localTimeStamp)
    //
    //    val test_df: DataFrame = frame.select(
    //      md5(concat_ws("", frame("id"), frame.col("name"))).alias("jxk_id"),
    //
    //      frame.col("id"),
    //      frame.col("name"),
    //      frame.col("age"))
    //      .withColumn("yyzyml00001", lit("126"))
    //
    //      .withColumn("sjzyml00003", lit("运营车辆信息"))
    //
    //      .withColumn("jxk_cjd", lit("150200"))
    //
    //      .withColumn("jxk_xxrksj", addColTimeStamp())
    //
    //      .withColumn("jxk_xxrksj_tmsp", addColTimeValue())
    //
    //    test_df.show(false)
    //
    //    //    val col: DataFrame = frame.as('df).select(s"")
    //
    //
    //    test_df.printSchema()

    //    val topic: String = jobParams.getString("reader.parameter.topic")
    //    val servers: String = jobParams.getString("reader.parameter.servers")
    //    val groupId: String = jobParams.getString("reader.parameter.groupId")
    //
    //    val arrayData: RDD[String] = spark.sparkContext.makeRDD(List(topic, servers, groupId))
    //
    //    arrayData.foreach((s: String) => {
    //      println("---------------------" + s)
    //    })


    spark.close()

  }

  def readJob(file: File): String = try {
    val in = new FileInputStream(file)
    try {
      val fileContent = new Array[Byte](file.length.toInt)
      in.read(fileContent)
      new String(fileContent, StandardCharsets.UTF_8)
    } catch {
      case e: Exception =>
        throw new RuntimeException(e)
    } finally if (in != null) in.close()
  }

  def getJsonConfiguration(jobConfig: String): ConfigurationJson = {
    val reader: String = JSON.parseObject(jobConfig)
      .getJSONObject("job")
      .getJSONArray("content")
      .getJSONObject(0).toString

    ConfigurationJson.from(reader)
  }

  def readCSV(spark: SparkSession, headerSchema: String, mySchema: ArrayBuffer[String], code: String, file: String) = {
    val rddArr: RDD[Array[String]] = spark.sparkContext.hadoopFile(file, classOf[TextInputFormat],
      classOf[LongWritable], classOf[Text]).map(
      (pair: (LongWritable, Text)) => new String(pair._2.getBytes, 0, pair._2.getLength, code))
      //处理同一个单元格 同时出现 引号 逗号串列问题 切割
      .map((_: String).trim.split(",(?=([^\"]*\"[^\"]*\")*[^\"]*$)", -1))
    val fieldArr: Array[String] = rddArr.first()
    //Row.fromSeq(_) 如果只是 map(Row(_)),会导致 spark.createDataFrame(rddRow,schema)错误
    val rddRow: RDD[Row] = rddArr.filter(!_.reduce(_ + _).equals(fieldArr.reduce(_ + _))).map(Row.fromSeq(_))
    val schemaList: ArrayBuffer[StructField] = ArrayBuffer[StructField]()
    if ("TRUE".equals(headerSchema)) {
      for (i <- fieldArr.indices) {
        println("fieldArr(i)=" + fieldArr(i))
        schemaList.append(StructField(mySchema(i), DataTypes.StringType))
      }
    } else {
      for (i <- fieldArr.indices) {
        schemaList.append(StructField(s"_c$i", DataTypes.StringType))
        println("fieldArr(i)=" + fieldArr(i))
      }
    }
    val schema = StructType(schemaList)
    spark.createDataFrame(rddRow, schema)
  }


}
