package com.kingsoft.dc.khaos.module.spark.preprocess.transform

import java.util.regex.Pattern

import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.dsl.utils.UdfUtils
import com.kingsoft.dc.khaos.innertype.Schema
import com.kingsoft.dc.khaos.metadata.{Dependency, KhaosStructField}
import com.kingsoft.dc.khaos.module.spark.constants.{ColumnType, SchedulerConstants}
import com.kingsoft.dc.khaos.module.spark.metadata.preprocess.transform.NewFilterAndMapInfo
import com.kingsoft.dc.khaos.module.spark.util.SparkJobHelper
import com.kingsoft.dc.khaos.util.Logging
import org.apache.commons.lang3.StringUtils
import org.apache.spark.sql.functions.{callUDF, lit}
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Column, DataFrame}
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods.parse

/**
  * Created by chengguangqing on 2019/6/26.
  */
class NewFilterAndMap extends TransformStrategy with Logging {

  private var filterAndMapInfo: NewFilterAndMapInfo = _
  private var nodeId: String = "00000"

  override def exec(kc: KhaosContext,
                    module_id: String,
                    config: String,
                    dependences: Seq[Dependency],
                    targets: Seq[Dependency]): Seq[(String, DataFrame)] = {
    //获取配置信息
    implicit val formats = DefaultFormats
    filterAndMapInfo = parse(config, true).extract[NewFilterAndMapInfo]
    //获取上游信息

    var finalDF: DataFrame = null
    try {
      //获取
      val (schema, dataFrame) = kc.structData[DataFrame](dependences.head.getEdge())
      finalDF = buildDataFrame(kc, dataFrame, filterAndMapInfo)
    } catch {
      case e: Exception => {
//        e.printStackTrace()
        throw new Exception(s"===>map算子执行失败:",e)
      }
    }
    // dependences.head.sourceSeq
    // val finalDF = doExpression(kc, dataFrame, filterAndMapInfo)
    if(StringUtils.isNotBlank(filterAndMapInfo.filter)){
      finalDF  = finalDF.filter(filterAndMapInfo.filter)
    }
    addResult(targets.head, finalDF)
  }

  override def schema(kc: KhaosContext,
                      module_id: String,
                      config: String,
                      dependencies: Seq[Dependency]): Schema = {

    val schema = if (filterAndMapInfo.extract_fields.isEmpty) {
      kc.schemaChannel.getSchema(dependencies.head.getSource())
    } else {
      val schemaList = filterAndMapInfo.extract_fields.map { optInfo =>
        KhaosStructField(optInfo.field, optInfo.data_type)
      }
      new Schema(schemaList)
    }
    schema
  }

  /**
    * 解析表达式，执行udf
    *
    * @param sColName
    * @param colType
    * @param expression
    * @param dataFrame
    * @return
    */
  def doExpression(sColName: String,
                   colType: String,
                   expression: String,
                   dataFrame: DataFrame): Column = {
    //解析表达式，返回函数名及参数
    val (udfName, argNameList) = UdfUtils.extractUDF(expression.replaceAll(" ",""))
    if (udfName != null) {
      var colArr = new Array[Column](argNameList.size)
      //匹配数字和参数为字符串的正则表达式  eg: concat(a1,1,'拼接')
      val isNumeric: Pattern = Pattern.compile("^-?[0-9]+([.]{1}[0-9]+){0,1}$")
      val isStrV1: Pattern = Pattern.compile("'.*'")
      val isStrV2: Pattern = Pattern.compile("\".*\"")

      val colList = dataFrame.columns.map(col => {
        col.toLowerCase
      })

      for (j <- 0 until argNameList.size) {
        if(argNameList.length == 1 && argNameList.head.equals("")){
          //支持无参函数 current_timestamp()
          colArr = new Array[Column](0)
        }else {
          //判断源表中是否包含udf需要处理的列
          if (colList.contains(argNameList(j).toLowerCase)) { //防止字段名大小写和底层引擎不一致
            //支持有参函数中的参数为已有字段 concat(col1,col2)
            //添加udf作用的列到数组
            colArr(j) = dataFrame.col(argNameList(j))
          } else if(isNumeric.matcher(argNameList(j)).matches()){
            //支持有参函数中的参数为数字常量 concat(col1,1)
            colArr(j) = lit(argNameList(j)).cast(StringType)
          }else if(isStrV1.matcher(argNameList(j)).matches()){
            //支持有参函数中的参数为数字常量 concat(col1,'str')
            colArr(j) = lit(argNameList(j).substring(1,argNameList(j).length-1)).cast(StringType)
          }else if(isStrV2.matcher(argNameList(j)).matches()){
            //支持有参函数中的参数为数字常量 concat(col1,"str")
            colArr(j) = lit(argNameList(j).substring(1,argNameList(j).length-1)).cast(StringType)
          }else{
            throw new Exception(s"UDF函数中包含未知的列！udf=$udfName,columnName=${argNameList(j)}")
          }
        }
      }
      callUDF(udfName, colArr: _*) as sColName
    } else { //常量处理
      SparkJobHelper.expressionConstantConvert(sColName, colType, expression, argNameList)
    }
  }

  /**
    * 兼容redis数据源
    * @param sColName
    * @param colType
    * @param value
    * @return
    */
  def addNewColumn(sColName: String, colType: String,value:String): Column = {
    val realType = colType.toUpperCase match {
      case ColumnType.STRING => StringType
      case ColumnType.NUMBER => LongType
      case ColumnType.DATE => DateType
      case ColumnType.DECIMAL => DoubleType
      case ColumnType.TIME => StringType
      case ColumnType.DATETIME => TimestampType
      case _ => NullType
    }
    lit(value).cast(realType) as sColName
  }

  /**
    * 根据所有字段信息构造df
    *
    * @param kc
    * @param dataFrame
    * @param map
    * @return
    */
  def buildDataFrame(kc: KhaosContext,
                     dataFrame: DataFrame,
                     map: NewFilterAndMapInfo): DataFrame = {
    //udf 注册
//    UdfUtils.registerUDF(kc.sparkSession)
    //表达式处理
    val opSourceTables = map.extract_fields
    //初始化数列数组
    val allColArr = new Array[Column](opSourceTables.size)
    for (i <- 0 until opSourceTables.size) {
      val optable = opSourceTables(i)
      val sColName = optable.field
      val colType = optable.data_type
      val tag = optable.tag.trim
      val expression = optable.option

      if (tag.equals(SchedulerConstants.SOURCE_TAG)) { //原有字段处理
        if (!expression.trim.isEmpty) {
          allColArr(i) = doExpression(sColName, colType, expression, dataFrame)
        } else { //表达式为空则不做处理
          allColArr(i) = dataFrame.col(sColName)
        }
      } else if (tag.equals(SchedulerConstants.TARGET_TAG)) { //新加字段处理
        if (!expression.trim.isEmpty) {
          allColArr(i) = doExpression(sColName, colType, expression, dataFrame)
        } else {
          //查询源DF是否包含该字段，存在则说明是redis数据源
          val isExist = dataFrame.schema.fieldNames.contains(sColName)
          if (!isExist) {
            allColArr(i) = addNewColumn(sColName, colType,null)
          }else{
            allColArr(i) = dataFrame.col(sColName)
          }
          //          throw new Exception(s"map算子新增的列expression属性不能为空~！")
        }
      } else {
        throw new Exception(s"map算子包含未知标识的列！tag=$tag")
      }
    }
    dataFrame.select(allColArr: _*)
  }
}
