package com.kingsoft.dc.khaos.module.spark.preprocess.transform

import java.util.regex.Pattern

import com.google.common.base.Splitter
import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.dsl.utils.UdfUtils
import com.kingsoft.dc.khaos.innertype.Schema
import com.kingsoft.dc.khaos.metadata.{Dependency, KhaosStructField}
import com.kingsoft.dc.khaos.module.spark.constants.ColumnType
import com.kingsoft.dc.khaos.module.spark.metadata.preprocess.transform.{Extract_Fields, SplitInfo, Split_After_Fields}
import com.kingsoft.dc.khaos.util.Logging
import org.apache.spark.sql.catalyst.encoders.RowEncoder
import org.apache.spark.sql.functions.{col, _}
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Column, DataFrame, Row}
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods.parse

import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.{ArrayBuffer, ListBuffer}

/**
  * create by yansu on 2020/03/25 10:53
  */
class Split extends TransformStrategy with Logging with Serializable {
  private var _splitInfo: SplitInfo = null

  object SplitMode extends Serializable {
    val NGINX = "nginx"
    val JSON = "json"
    val FIXED_LENGTH = "fixed_length"
    val CHARACTER = "character"
  }

  override def exec(kc: KhaosContext,
                    module_id: String,
                    config: String,
                    dependences: Seq[Dependency],
                    targets: Seq[Dependency]) = {
    val spark = kc.sparkSession
    // 解析config
    implicit val formats = DefaultFormats
    val splitInfo: SplitInfo = parse(config, true).extract[SplitInfo]
    this._splitInfo = splitInfo
    var resDataFrame: DataFrame = kc.structData[DataFrame](dependences.filter(_.targetSeq.equalsIgnoreCase("0")).head.getEdge())._2
    resDataFrame = dataSplit(kc, resDataFrame, splitInfo)
    addResult(targets.head, resDataFrame)
  }

  /*
  拆解数据
   */
  def dataSplit(kc: KhaosContext, data: DataFrame, splitInfo: SplitInfo): DataFrame = {
    var resDataFrame = data
    for (split_option <- splitInfo.split_config) {
      val split_mode = split_option.split_mode.get
      val split_field = split_option.split_field.get
      val split_char = split_option.split_character.get
      val custom_character = split_option.custom_character.get
      val split_length = split_option.split_length.get
      val split_after_fields: List[Split_After_Fields] = split_option.split_after_fields
      if (split_mode != null && split_mode != "") {
        split_mode match {
          case SplitMode.NGINX => {
            resDataFrame = nginxSplit(resDataFrame, split_field, split_after_fields, splitInfo.extract_fields)
          }
          case SplitMode.CHARACTER => {
            resDataFrame = charSplit(resDataFrame, split_field, split_char, custom_character, split_after_fields, splitInfo.extract_fields)
          }
          case SplitMode.FIXED_LENGTH => {
            resDataFrame = leagthSplit(resDataFrame, split_field, split_length, split_after_fields, splitInfo.extract_fields)
          }
          case SplitMode.JSON => {
            resDataFrame = jsonSplit(resDataFrame, split_field, split_after_fields, splitInfo.extract_fields)
          }
          case _ => throw new Exception("暂不支持拆解该类型的日志!")
        }
      } else {
        resDataFrame = data
      }
    }
    resDataFrame
  }

  /*
  解析表达式,构造udf函数列
   */
  def getSplitColumn(data: DataFrame,
                     split_after_fields: List[Split_After_Fields],
                     extract_Fields: List[Extract_Fields]) = {
    var reslutDataFrame: DataFrame = data
    // dataframe中的字段
    var listColumn = ListBuffer[Column]()
    var nestColumn = ListBuffer[String]()
    val nestMap = new mutable.HashMap[Int, Column]()

    for (elem <- extract_Fields) {
      listColumn += col(elem.field).as(elem.field)
    }
    // 用户添加的字段
    var fields_arr = ListBuffer[String]()
    for (elem <- split_after_fields) {
      fields_arr += elem.field
    }
    val map = new mutable.HashMap[String, String]()
    for (i <- 0 to split_after_fields.size - 1) {
      val split_after_fields_name = split_after_fields(i).field
      val split_after_fields_type = split_after_fields(i).data_type
      if (split_after_fields(i).option == "" || split_after_fields(i).option == null) {
        // 没有表达式的情况
        listColumn += col(split_after_fields_name).cast(getDataType(split_after_fields_type)).as(split_after_fields_name)
      } else { // 有表达式的情况
        val (udfName, argsList): (String, List[String]) = UdfUtils.extractUDF(split_after_fields(i).option)
        if (udfName != null) { //表达式不为常量
          // 根据参数类型放入不同的map中,key为参数在参数列表中的位置,value为参数名
          var argColumnConstant = new mutable.HashMap[Int, Column]() // 添加常数参数
        var argColumnVariable = new mutable.HashMap[Int, Column]() // 添加变量参数
        var tmp_ = 0
          if (argsList.size > 1) { // 多个参数
            for (arg <- argsList) {
              val (bol_, col_) = argIsConstant(arg)
              if (fields_arr.contains(arg)) { // 参数为字段名
                tmp_ += 1
                val index = fields_arr.indexOf(arg)
                argColumnVariable += (argsList.indexOf(arg) -> col(arg).cast(getDataType(split_after_fields(index).data_type)).as(arg))
              } else if (bol_) { // 参数为常量
                argColumnConstant += (argsList.indexOf(arg) -> col_)
              } else {
                throw new IllegalArgumentException(s"未知参数字段:${arg}")
              }
            }
            // get_json支持可变参数,特殊处理
            udfNameCaseValue(udfName) match {
              case 1 => {
                // (index,column) 的形式保证参数位置正确
                val MapCol = new mutable.HashMap[Int, Column]
                val arrayCol = new ListBuffer[Column]
                val callCol = new ListBuffer[Column]
                for (elem <- argColumnVariable) {
                  MapCol += (elem._1 -> elem._2)
                }
                for (elem <- argColumnConstant) {
                  MapCol += (elem._1 -> elem._2)
                }
                for (elem <- MapCol.toArray.sortBy(x => x._1)) {
                  arrayCol += elem._2
                }
                callCol += array(arrayCol.toList: _*)
                // 解析udf这件的依赖
                for (arg <- argsList) {
                  if (fields_arr.contains(arg)) { // 参数为字段名
                    if (!arg.equalsIgnoreCase(split_after_fields_name)) {
                      if (!split_after_fields(fields_arr.indexOf(arg)).option.isEmpty) {
                        nestColumn += split_after_fields_name
                        nestMap.put(i, callUDF(udfName, callCol.toList: _*).cast(getDataType(split_after_fields_type)).as(split_after_fields_name))
                      }
                    }
                  }
                }
                listColumn += callUDF(udfName, callCol.toList: _*).cast(getDataType(split_after_fields_type)).as(split_after_fields_name)
              }
              case 2 => {
                throw new IllegalArgumentException("参数数量不正确,只支持解析一列")
              }
              case 3 => { // 正常的udf
                val callCol = new ListBuffer[Column]
                for (elem <- argColumnVariable) {
                  argColumnConstant += elem._1 -> elem._2
                }
                for (elem <- argColumnConstant.toArray.sortBy(_._1)) {
                  callCol += elem._2
                }
                // 解析udf这件的依赖
                for (arg <- argsList) {
                  if (fields_arr.contains(arg)) { // 参数为字段名
                    if (!arg.equalsIgnoreCase(split_after_fields_name)) {
                      if (!split_after_fields(fields_arr.indexOf(arg)).option.isEmpty) {
                        nestColumn += split_after_fields_name
                        nestMap.put(i, callUDF(udfName, callCol.toList: _*).cast(getDataType(split_after_fields_type)).as(split_after_fields_name))
                      }
                    }
                  }
                }
                listColumn += callUDF(udfName, callCol.toList: _*).cast(getDataType(split_after_fields_type)).as(split_after_fields_name)
              }
            }
          } else if (argsList.size == 1) { // 参数为一个
            udfNameCaseValue(udfName) match {
              case 2 => {
                map.put(split_after_fields_name, udfName + "-" + argsList.head)
                // 若切割列里存在,则为真实的值,若不存在则为null,后面替换该值
                listColumn += col(split_after_fields_name).cast(getDataType(split_after_fields_type))
              }
              case _ => {
                val arg_colum_only = new ListBuffer[Column]
                val (bol_, col_) = argIsConstant(argsList.head)
                if (fields_arr.contains(argsList.head)) { // 参数为字段
                  arg_colum_only += col(argsList.head).cast(getDataType(split_after_fields(i).data_type))
                } else if (bol_) { //参数为常量
                  arg_colum_only += col_
                } else {
                  throw new IllegalArgumentException(s"未知参数字段:${argsList.head}")
                }
                // 解析udf这件的依赖
                for (arg <- argsList) {
                  if (fields_arr.contains(arg)) { // 参数为字段名
                    if (!arg.equalsIgnoreCase(split_after_fields_name)) {
                      if (!split_after_fields(fields_arr.indexOf(arg)).option.isEmpty) {
                        nestColumn += split_after_fields_name
                        nestMap.put(i, callUDF(udfName, arg_colum_only.toList: _*).cast(getDataType(split_after_fields_type)).as(split_after_fields_name))
                      }
                    }
                  }
                }
                listColumn += callUDF(udfName, arg_colum_only.toList: _*).cast(getDataType(split_after_fields_type)).as(split_after_fields_name)
              }
            }
          } else {
            listColumn += callUDF(udfName, new Array[Column](0): _*)
          }
        }
      }
    }
    val resColumn = new ListBuffer[Column]
    reslutDataFrame = reslutDataFrame.select(listColumn.toList: _*)
    for (elem <- nestMap) {
      val callUdf = elem._2
      for (elem <- listColumn) {
        if (elem.toString().equalsIgnoreCase(callUdf.toString())) {
          listColumn -= callUdf
          resColumn += callUdf
        }
      }
      for (elem <- reslutDataFrame.columns) {
        if (!nestColumn.contains(elem)) {
          resColumn += col(elem)
        }
      }
      reslutDataFrame = reslutDataFrame.select(resColumn.distinct.toList: _*)
    }
    val udfNameMap = UdfUtils.registerUDFCustom()
    for (elem <- map) {
      val udfNameAndArg = elem._2.split("-")
      val udfName = udfNameAndArg(0)
      val args = udfNameAndArg(1)
      if (udfNameMap.contains(udfName)) {
        // 去除参数集合中的特殊udfname
        val clazz = Class.forName("com.kingsoft.dc.khaos.dsl.spark.udf.custom.Ks3UdfCustom")
        val method = clazz.getMethod(udfName, classOf[DataFrame], classOf[String], classOf[String])
        val instance = clazz.newInstance()
        reslutDataFrame = method.invoke(instance, reslutDataFrame, args, elem._1).asInstanceOf[DataFrame]
      } else {
        throw new IllegalArgumentException(s"${udfName} 未注册")
      }
    }
    reslutDataFrame
  }

  def udfNameCaseValue(udfName: String): Int = {
    var res = 0
    udfName match {
      // 可变参数udf
      case "get_json" | "concat_plus" => res = 1
      // 另一种方式的udf
      case "get_IpState" |
           "get_IpProvince" |
           "get_IpCity" |
           "get_IpOperators" => res = 2
      // 普通的udf
      case _ => res = 3
    }
    res
  }

  def jsonSplit(data: DataFrame,
                split_field: String,
                split_after_fields: List[Split_After_Fields],
                extract_Fields: List[Extract_Fields]) = {

    var res = data.withColumn(split_field, to_json(struct(col(split_field))))
    res = res.withColumn(split_field, json_tuple(col(split_field), split_field))
    for (field_info <- split_after_fields) {
      res = res.withColumn(field_info.field, json_tuple(col(split_field), field_info.field))
    }
    var list_column = ListBuffer[Column]()
    for (elem <- data.columns) {
      list_column += col(elem).as(elem)
    }
    var fields_arr = ListBuffer[String]()
    for (elem <- split_after_fields) {
      fields_arr += elem.field
    }
    for (i <- 0 to split_after_fields.size - 1) {
      val split_after_fields_name = split_after_fields(i).field
      val split_after_fields_type = split_after_fields(i).data_type
      if (split_after_fields(i).option == "" || split_after_fields(i).option == null) {
        // 没有表达式的情况
        list_column += col(split_after_fields_name).as(split_after_fields_name).cast(getDataType(split_after_fields_type))
      } else { // 有表达式的情况
        val (udfName, argsList) = UdfUtils.extractUDF(split_after_fields(i).option)
        if (udfName != null) {
          var arg_column = new ListBuffer[Column]()
          if (argsList.size > 1) { // 参数大于一个
            for (arg <- argsList) { // 不支持参数同时有常量和变量的情况
              val (bol, col) = argIsConstant(arg)
              if (fields_arr.contains(arg)) { // 参数为字段
                val index = fields_arr.indexOf(arg)
                arg_column += col(arg).cast(getDataType(split_after_fields(index).data_type))
              } else if (bol) { //参数为常量
                arg_column += col
              } else {
                throw new IllegalArgumentException(s"未知参数字段:${arg}")
              }
            }.asJava
            list_column += callUDF(udfName, arg_column.toList: _*).cast(getDataType(split_after_fields_type))
          } else if (argsList.size == 1) { // 参数为一个
            val (bol, col) = argIsConstant(argsList.head)
            if (fields_arr.contains(argsList.head)) { // 参数为字段
              val index = fields_arr.indexOf(argsList.head)
              arg_column += col(argsList.head).cast(getDataType(split_after_fields(index).data_type))
            } else if (bol) { //参数为常量
              arg_column += col
            } else {
              throw new IllegalArgumentException(s"未知参数字段:${argsList.head}")
            }
            list_column += callUDF(udfName, arg_column.toList: _*).cast(getDataType(split_after_fields_type))
          } else {
            throw new IllegalArgumentException("表达式参数不能为空!")
          }
          list_column
        }
      }
    }
    res.select(list_column.toList: _*)
  }

  def getDataType(dataType: String): DataType = {
    var value: DataType = null
    value = dataType match {
      case ColumnType.STRING => DataTypes.StringType
      case ColumnType.NUMBER => DataTypes.LongType
      case ColumnType.DATE => DataTypes.DateType
      case ColumnType.DECIMAL => DataTypes.DoubleType
      case ColumnType.TIME => DataTypes.StringType
      case ColumnType.DATETIME => DataTypes.TimestampType
      case "Long" => DataTypes.StringType
      case _ => DataTypes.StringType
    }
    value
  }

  /*
  判断参数为常数还是变量,构造udf所需要的列
   */
  def argIsConstant(arg: String) = {
    val intPattern = Pattern.compile("^[-\\+]?[\\d]*$"); //123
    val floatPattern = Pattern.compile("([1-9]+[0-9]*|0)(\\.[\\d]+)?"); //123.23
    val isNumber = intPattern.matcher(arg.trim).matches()
    val isDeciml = floatPattern.matcher(arg.trim).matches()
    var boolean = false
    var column: Column = null
    if (isNumber) {
      boolean = true
      column = lit(arg.trim.toLong).cast(LongType)
    } else if (isDeciml) {
      boolean = true
      column = lit(arg.trim.toFloat).cast(FloatType)
    } else {
      if (arg.contains("'") && arg.contains("'") || arg.contains("\"")) {
        boolean = true
        column = lit(arg.trim.substring(1, arg.length - 1)).cast(StringType)
      } else {
        boolean = false
      }
    }
    (boolean, column)
  }

  /*
  固定长度切分
   */
  def leagthSplit(
                   data: DataFrame,
                   split_field: String,
                   split_length: String,
                   split_after_fields: List[Split_After_Fields],
                   extract_Fields: List[Extract_Fields]) = {
    //    val flag_time = "col_" + System.currentTimeMillis()
    if (split_field == "" || split_field.equals(null)) {
      throw new IllegalArgumentException("分割字段不能为空,请重新选择!")
    }
    //    val subString = udf(subString_leagth _)
    //    kc.sparkSession.udf.register("subString_leagth", subString)
    //    var res = data.withColumn("split_after_arr" + flag_time, subString(col(split_field), lit(split_after_fields.size), lit(split_length)))
    var res = leagthDfSplit(data, split_after_fields, split_field, split_length)
    res = getSplitColumn(res, split_after_fields, extract_Fields)
    res
  }

  def leagthDfSplit(dataFrame: DataFrame,
                    split_after_fields: List[Split_After_Fields],
                    split_field: String,
                    split_length: String) = {
    var res = dataFrame
    var schema: StructType = res.schema
    for (elem <- split_after_fields) {
      schema = schema.add(elem.field, StringType, true)
    }
    res = res.map(row => {
      val ss = schema.size
      var resRow = row
      var index: Int = 0
      val colArr = new Array[String](ss)
      val key_value = row.getAs[String](split_field)
      // 根据单引号进行切分,切分后去除空值
      val strSize = key_value.size
      for (i <- 0 to ss - 1) {
        if (index + split_length.toInt <= strSize) {
          val resValue = key_value.substring(index, index + split_length.toInt)
          resRow = Row.merge(resRow, Row(resValue))
          index += split_length.toInt
        } else {
          if (strSize - index < split_length.toInt) {
            if (index < strSize) {
              val resValue = key_value.substring(index, index + split_length.toInt)
              resRow = Row.merge(resRow, Row(resValue))
              index += split_length.toInt
            } else {
              colArr.update(i, "")
              resRow = Row.merge(resRow, Row(""))
              index += split_length.toInt
            }
          }
        }
      }
      val rs = resRow.size
      if (rs > ss) {
        // 字段多,元数据少,不影响

      } else if (rs == ss) { // 一样

      } else {
        // 字段少,元数据多,补齐字段
        for (i <- 0 to ss - rs) {
          resRow = Row.merge(resRow, Row(null))
        }
      }
      resRow
    })(RowEncoder(schema))
    res
  }


  /*
  固定长度切分udf
   */
  def subString_leagth(str: String, saf_size: Int, split_length: String) = {
    val colArr = new Array[String](saf_size)
    var index: Int = 0
    val strSize = str.size
    for (i <- 0 to saf_size - 1) {
      if (index + split_length.toInt <= strSize) {
        colArr.update(i, str.substring(index, index + split_length.toInt))
        index += split_length.toInt
      } else {
        if (strSize - index < split_length.toInt) {
          if (index < strSize) {
            colArr.update(i, str.substring(index))
            index += split_length.toInt
          } else {
            colArr.update(i, "")
            index += split_length.toInt
          }
        }
      }
    }
    colArr
  }

  /*
  分割符切分
   */
  def charSplit(data: DataFrame,
                split_field: String,
                split_char: String,
                custom_character: String,
                split_after_fields: List[Split_After_Fields],
                extract_Fields: List[Extract_Fields]) = {
    if (split_field == "" || split_field.equals(null)) {
      throw new IllegalArgumentException("分割字段不能为空,请重新选择!")
    }
    var char1 = ""
    if (split_char.equalsIgnoreCase("custom")) {
      char1 = custom_character
    } else {
      char1 = split_char
    }
    var res: DataFrame = data
    res = charDfSplit(res, split_after_fields, split_field, char1)
    res = getSplitColumn(res, split_after_fields, extract_Fields)
    res
  }

  /*
  nginx日志切分
   */
  def nginxSplit(data: DataFrame,
                 split_field: String,
                 split_after_fields: List[Split_After_Fields],
                 extract_Fields: List[Extract_Fields]) = {
    if (split_field == "" || split_field.equals(null)) {
      throw new IllegalArgumentException("分割字段不能为空,请重新选择!")
    }
    val field_arr_name = ArrayBuffer[String]()
    for (elem <- data.columns) {
      field_arr_name += elem
    }
    var res: DataFrame = null
    res = nginxDfSplit(data, split_after_fields, split_field)
    res = getSplitColumn(res, split_after_fields, extract_Fields)
    res
  }

  def charDfSplit(dataFrame: DataFrame,
                  split_after_fields: List[Split_After_Fields],
                  split_field: String,
                  splitChar: String): DataFrame = {
    var res = dataFrame
    var schema: StructType = res.schema
    for (elem <- split_after_fields) {
      schema = schema.add(elem.field, StringType, true)
    }
    var tabChar = splitChar
    if (splitChar.equalsIgnoreCase("\\t")) {
      tabChar = "\t"
    }
    res = res.map(row => {
      var resRow = row
      val key_value = row.getAs[String](split_field)
      // 根据单引号进行切分,切分后去除空值
      val holder = Splitter.on(s"${tabChar}").trimResults.split(key_value)
      val iter = holder.iterator()
      while (iter.hasNext) {
        val nt = iter.next()
        resRow = Row.merge(resRow, Row(nt))
      }
      val rs = resRow.size
      val ss = schema.size
      if (rs > ss) {
        // 字段多,元数据少,不影响

      } else if (rs == ss) { // 一样

      } else {
        // 字段少,元数据多,补齐字段
        for (i <- 0 to ss - rs) {
          resRow = Row.merge(resRow, Row(null))
        }
      }
      resRow
    })(RowEncoder(schema))
    res
  }

  def nginxDfSplit(dataFrame: DataFrame,
                   split_after_fields: List[Split_After_Fields],
                   split_field: String): DataFrame = {
    var res = dataFrame
    var schema: StructType = res.schema
    for (elem <- split_after_fields) {
      schema = schema.add(elem.field, StringType, true)
    }
    res = res.map(row => {
      var resRow = row
      val key_value = row.getAs[String](split_field)
      // 根据单引号进行切分,切分后去除空值
      val holder = Splitter.on("\"").trimResults.omitEmptyStrings().split(key_value)
      val iter = holder.iterator()
      while (iter.hasNext) {
        val nt = iter.next()
        resRow = Row.merge(resRow, Row(nt))
      }
      val rs = resRow.size
      val ss = schema.size
      if (rs > ss) {
        // 字段多,元数据少,不影响
      } else if (rs == ss) { // 一样
      } else {
        // 字段少,元数据多,补齐字段
        for (i <- 0 to ss - rs) {
          resRow = Row.merge(resRow, Row(null))
        }
      }
      resRow
    })(RowEncoder(schema))
    res
  }

  override def schema(kc: KhaosContext,
                      module_id: String,
                      config: String,
                      dependences: Seq[Dependency])

  = {
    val schema = if (_splitInfo.extract_fields.isEmpty) {
      kc.schemaChannel.getSchema(dependences.head.getSource())
    } else {
      val schemaList = _splitInfo.extract_fields.map { optInfo =>
        KhaosStructField(optInfo.field, optInfo.data_type)
      }
      new Schema(schemaList)
    }
    schema
  }
}
